From 41a4712b879f34a9e0c77d2449c5ada69f8be1d7 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 22 Nov 2024 14:09:20 +0100 Subject: [PATCH 01/42] Add topology validation to liquid service info requests --- internal/plugins/capacity_liquid.go | 4 ++++ internal/plugins/liquid.go | 4 ++++ internal/plugins/utils.go | 19 +++++++++++++++++++ 3 files changed, 27 insertions(+) diff --git a/internal/plugins/capacity_liquid.go b/internal/plugins/capacity_liquid.go index 059154c36..6ccfd82cd 100644 --- a/internal/plugins/capacity_liquid.go +++ b/internal/plugins/capacity_liquid.go @@ -72,6 +72,10 @@ func (p *liquidCapacityPlugin) Init(ctx context.Context, client *gophercloud.Pro return fmt.Errorf("cannot initialize ServiceClient for %s: %w", p.LiquidServiceType, err) } p.LiquidServiceInfo, err = p.LiquidClient.GetInfo(ctx) + if err != nil { + return err + } + err = checkResourceTopologies(p.LiquidServiceInfo) return err } diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index 23e600e38..b62106cfa 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -83,6 +83,10 @@ func (p *liquidQuotaPlugin) Init(ctx context.Context, client *gophercloud.Provid return fmt.Errorf("cannot initialize ServiceClient for liquid-%s: %w", serviceType, err) } p.LiquidServiceInfo, err = p.LiquidClient.GetInfo(ctx) + if err != nil { + return err + } + err = checkResourceTopologies(p.LiquidServiceInfo) return err } diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index 405b21ce9..ce1b8d3a8 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -19,6 +19,25 @@ package plugins +import ( + "fmt" + "github.com/sapcc/go-api-declarations/liquid" +) + func p2u64(val uint64) *uint64 { return &val } + +func checkResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { + invalidTopologies := map[liquid.ResourceName]liquid.ResourceTopology{} + resources := serviceInfo.Resources + for k, v := range resources { + if !v.Topology.IsValid() || v.Topology != "" { + invalidTopologies[k] = v.Topology + } + } + if len(invalidTopologies) > 0 { + return fmt.Errorf("invalid topologies detected: %v", invalidTopologies) + } + return +} From 97d77ceb6f48d60f498dfd95c0fc842147d922f8 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 22 Nov 2024 17:13:40 +0100 Subject: [PATCH 02/42] Add toplogy check against quota and capacity scrapes from liquids --- internal/plugins/capacity_liquid.go | 8 ++++++++ internal/plugins/liquid.go | 8 ++++++++ internal/plugins/utils.go | 22 ++++++++++++++++++++++ 3 files changed, 38 insertions(+) diff --git a/internal/plugins/capacity_liquid.go b/internal/plugins/capacity_liquid.go index 6ccfd82cd..463e76c68 100644 --- a/internal/plugins/capacity_liquid.go +++ b/internal/plugins/capacity_liquid.go @@ -94,6 +94,14 @@ func (p *liquidCapacityPlugin) Scrape(ctx context.Context, backchannel core.Capa logg.Fatal("ServiceInfo version for %s changed from %d to %d; restarting now to reload ServiceInfo...", p.LiquidServiceType, p.LiquidServiceInfo.Version, resp.InfoVersion) } + for resourceName, resource := range p.LiquidServiceInfo.Resources { + perAZ := resp.Resources[resourceName].PerAZ + toplogy := resource.Topology + err := matchLiquidReportToTopology(perAZ, toplogy) + if err != nil { + return nil, nil, err + } + } resultInService := make(map[liquid.ResourceName]core.PerAZ[core.CapacityData], len(p.LiquidServiceInfo.Resources)) for resName, resInfo := range p.LiquidServiceInfo.Resources { diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index b62106cfa..6814931a1 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -123,6 +123,14 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro logg.Fatal("ServiceInfo version for %s changed from %d to %d; restarting now to reload ServiceInfo...", p.LiquidServiceType, p.LiquidServiceInfo.Version, resp.InfoVersion) } + for resourceName, resource := range p.LiquidServiceInfo.Resources { + perAZ := resp.Resources[resourceName].PerAZ + toplogy := resource.Topology + err := matchLiquidReportToTopology(perAZ, toplogy) + if err != nil { + return nil, nil, err + } + } result = make(map[liquid.ResourceName]core.ResourceData, len(p.LiquidServiceInfo.Resources)) for resName := range p.LiquidServiceInfo.Resources { diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index ce1b8d3a8..e8ae16abd 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -21,6 +21,8 @@ package plugins import ( "fmt" + "reflect" + "github.com/sapcc/go-api-declarations/liquid" ) @@ -41,3 +43,23 @@ func checkResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { } return } + +func matchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone]*V, topology liquid.ResourceTopology) (err error) { + _, anyExists := perAZReport[liquid.AvailabilityZoneAny] + _, unknownExsits := perAZReport[liquid.AvailabilityZoneUnknown] + switch topology { + case liquid.FlatResourceTopology: + if len(perAZReport) == 1 && anyExists { + return + } + case liquid.AZAwareResourceTopology: + if len(perAZReport) > 0 && !anyExists { + return + } + case liquid.AZSeparatedResourceTopology: + if len(perAZReport) > 0 && !anyExists && !unknownExsits { + return + } + } + return fmt.Errorf("scrape with toplogy type: %v returned AZs: %v", topology, reflect.ValueOf(perAZReport).MapKeys()) +} From fd21fd437a71e56993f22866975ee03026eb1d13 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 22 Nov 2024 17:15:35 +0100 Subject: [PATCH 03/42] Fix typo --- internal/plugins/utils.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index e8ae16abd..f9ab49f4a 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -46,7 +46,7 @@ func checkResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { func matchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone]*V, topology liquid.ResourceTopology) (err error) { _, anyExists := perAZReport[liquid.AvailabilityZoneAny] - _, unknownExsits := perAZReport[liquid.AvailabilityZoneUnknown] + _, unknownExists := perAZReport[liquid.AvailabilityZoneUnknown] switch topology { case liquid.FlatResourceTopology: if len(perAZReport) == 1 && anyExists { @@ -57,7 +57,7 @@ func matchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone] return } case liquid.AZSeparatedResourceTopology: - if len(perAZReport) > 0 && !anyExists && !unknownExsits { + if len(perAZReport) > 0 && !anyExists && !unknownExists { return } } From dedec021d361dbcbb2580544c81efe76d889af5a Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 22 Nov 2024 17:36:48 +0100 Subject: [PATCH 04/42] add more details to thrown errors during scrape --- internal/plugins/capacity_liquid.go | 2 +- internal/plugins/liquid.go | 2 +- internal/plugins/utils.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/plugins/capacity_liquid.go b/internal/plugins/capacity_liquid.go index 463e76c68..ceba78b0f 100644 --- a/internal/plugins/capacity_liquid.go +++ b/internal/plugins/capacity_liquid.go @@ -99,7 +99,7 @@ func (p *liquidCapacityPlugin) Scrape(ctx context.Context, backchannel core.Capa toplogy := resource.Topology err := matchLiquidReportToTopology(perAZ, toplogy) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("service: %s, resource: %s: %w", p.LiquidServiceType, resourceName, err) } } diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index 6814931a1..ebd705dc0 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -128,7 +128,7 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro toplogy := resource.Topology err := matchLiquidReportToTopology(perAZ, toplogy) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err) } } diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index f9ab49f4a..ad173ff5f 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -61,5 +61,5 @@ func matchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone] return } } - return fmt.Errorf("scrape with toplogy type: %v returned AZs: %v", topology, reflect.ValueOf(perAZReport).MapKeys()) + return fmt.Errorf("scrape with toplogy type: %s returned AZs: %v", topology, reflect.ValueOf(perAZReport).MapKeys()) } From 2498d7a12ee5a50b25f864b9317dda01f33327e9 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Mon, 25 Nov 2024 17:20:00 +0100 Subject: [PATCH 05/42] Add scrape tests --- internal/collector/scrape_test.go | 33 ++++++++++++++++++++++ internal/plugins/capacity_liquid.go | 4 +-- internal/plugins/liquid.go | 4 +-- internal/plugins/utils.go | 38 +++++++++++++++++++------- internal/test/plugins/quota_generic.go | 28 ++++++++++++++++--- 5 files changed, 89 insertions(+), 18 deletions(-) diff --git a/internal/collector/scrape_test.go b/internal/collector/scrape_test.go index d36ee371d..defa860e3 100644 --- a/internal/collector/scrape_test.go +++ b/internal/collector/scrape_test.go @@ -21,6 +21,7 @@ package collector import ( "database/sql" + "errors" "net/http" "regexp" "testing" @@ -582,3 +583,35 @@ func Test_ScrapeReturnsNoUsageData(t *testing.T) { scrapedAt.Unix(), scrapedAt.Add(scrapeInterval).Unix(), ) } + +func Test_TopologyScrapes(t *testing.T) { + s := test.NewSetup(t, + test.WithConfig(testScrapeBasicConfigYAML), + ) + prepareDomainsAndProjectsForScrape(t, s) + + c := getCollector(t, s) + job := c.ResourceScrapeJob(s.Registry) + withLabel := jobloop.WithLabel("service_type", "unittest") + plugin := s.Cluster.QuotaPlugins["unittest"].(*plugins.GenericQuotaPlugin) + + _, tr0 := easypg.NewTracker(t, s.DB.Db) + tr0.AssertEqualToFile("fixtures/scrape0.sql") + + var status *any + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.FlatResourceTopology}} + plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"az-one": status, "az-two": status} + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with toplogy type: flat returned AZs: [az-one az-two]")) + + plugin.LiquidServiceInfo.Resources["capacity"] = liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology} + plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"any": status} + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: service: unittest, resource: capacity: scrape with toplogy type: az-aware returned AZs: [any]")) + + s.Clock.StepBy(scrapeInterval) + plugin.LiquidServiceInfo.Resources["capacity"] = liquid.ResourceInfo{Topology: liquid.AZSeparatedResourceTopology} + plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"az-one": status, "unknown": status} + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with toplogy type: az-separated returned AZs: [az-one unknown]")) + + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: "invalidAZ1"}, "things": {Topology: "invalidAZ2"}} + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: invalid toplogy: invalidAZ1 on resource: capacity\ninvalid toplogy: invalidAZ2 on resource: things")) +} diff --git a/internal/plugins/capacity_liquid.go b/internal/plugins/capacity_liquid.go index ceba78b0f..3542ac049 100644 --- a/internal/plugins/capacity_liquid.go +++ b/internal/plugins/capacity_liquid.go @@ -75,7 +75,7 @@ func (p *liquidCapacityPlugin) Init(ctx context.Context, client *gophercloud.Pro if err != nil { return err } - err = checkResourceTopologies(p.LiquidServiceInfo) + err = CheckResourceTopologies(p.LiquidServiceInfo) return err } @@ -97,7 +97,7 @@ func (p *liquidCapacityPlugin) Scrape(ctx context.Context, backchannel core.Capa for resourceName, resource := range p.LiquidServiceInfo.Resources { perAZ := resp.Resources[resourceName].PerAZ toplogy := resource.Topology - err := matchLiquidReportToTopology(perAZ, toplogy) + err := MatchLiquidReportToTopology(perAZ, toplogy) if err != nil { return nil, nil, fmt.Errorf("service: %s, resource: %s: %w", p.LiquidServiceType, resourceName, err) } diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index ebd705dc0..fba19f7d6 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -86,7 +86,7 @@ func (p *liquidQuotaPlugin) Init(ctx context.Context, client *gophercloud.Provid if err != nil { return err } - err = checkResourceTopologies(p.LiquidServiceInfo) + err = CheckResourceTopologies(p.LiquidServiceInfo) return err } @@ -126,7 +126,7 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro for resourceName, resource := range p.LiquidServiceInfo.Resources { perAZ := resp.Resources[resourceName].PerAZ toplogy := resource.Topology - err := matchLiquidReportToTopology(perAZ, toplogy) + err := MatchLiquidReportToTopology(perAZ, toplogy) if err != nil { return nil, nil, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err) } diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index ad173ff5f..2e2e42c2a 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -20,8 +20,9 @@ package plugins import ( + "errors" "fmt" - "reflect" + "sort" "github.com/sapcc/go-api-declarations/liquid" ) @@ -30,21 +31,29 @@ func p2u64(val uint64) *uint64 { return &val } -func checkResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { - invalidTopologies := map[liquid.ResourceName]liquid.ResourceTopology{} +func CheckResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { + errs := []error{} resources := serviceInfo.Resources - for k, v := range resources { - if !v.Topology.IsValid() || v.Topology != "" { - invalidTopologies[k] = v.Topology + + var resourceNames []string + for resource := range resources { + resourceNames = append(resourceNames, string(resource)) + } + sort.Strings(resourceNames) + + for _, resourceName := range resourceNames { + topology := resources[liquid.ResourceName(resourceName)].Topology + if !topology.IsValid() { + errs = append(errs, fmt.Errorf("invalid toplogy: %s on resource: %s", topology, resourceName)) } } - if len(invalidTopologies) > 0 { - return fmt.Errorf("invalid topologies detected: %v", invalidTopologies) + if len(errs) > 0 { + return errors.Join(errs...) } return } -func matchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone]*V, topology liquid.ResourceTopology) (err error) { +func MatchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone]*V, topology liquid.ResourceTopology) (err error) { _, anyExists := perAZReport[liquid.AvailabilityZoneAny] _, unknownExists := perAZReport[liquid.AvailabilityZoneUnknown] switch topology { @@ -60,6 +69,15 @@ func matchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone] if len(perAZReport) > 0 && !anyExists && !unknownExists { return } + case "": + return } - return fmt.Errorf("scrape with toplogy type: %s returned AZs: %v", topology, reflect.ValueOf(perAZReport).MapKeys()) + + var reportedAZs []string + for az := range perAZReport { + reportedAZs = append(reportedAZs, string(az)) + } + sort.Strings(reportedAZs) + + return fmt.Errorf("scrape with toplogy type: %s returned AZs: %v", topology, reportedAZs) } diff --git a/internal/test/plugins/quota_generic.go b/internal/test/plugins/quota_generic.go index 7059ecfe9..898359257 100644 --- a/internal/test/plugins/quota_generic.go +++ b/internal/test/plugins/quota_generic.go @@ -34,6 +34,7 @@ import ( "github.com/sapcc/limes/internal/core" "github.com/sapcc/limes/internal/db" + "github.com/sapcc/limes/internal/plugins" ) func init() { @@ -45,15 +46,17 @@ func init() { // operations. type GenericQuotaPlugin struct { ServiceType db.ServiceType `yaml:"-"` + LiquidServiceInfo liquid.ServiceInfo `yaml:"-"` StaticRateInfos map[liquid.RateName]liquid.RateInfo `yaml:"rate_infos"` StaticResourceData map[liquid.ResourceName]*core.ResourceData `yaml:"-"` StaticResourceAttributes map[liquid.ResourceName]map[string]any `yaml:"-"` OverrideQuota map[string]map[liquid.ResourceName]uint64 `yaml:"-"` // first key is project UUID // behavior flags that can be set by a unit test - ScrapeFails bool `yaml:"-"` - SetQuotaFails bool `yaml:"-"` - MinQuota map[liquid.ResourceName]uint64 `yaml:"-"` - MaxQuota map[liquid.ResourceName]uint64 `yaml:"-"` + ReportedAZs map[liquid.AvailabilityZone]*any `yaml:"-"` + ScrapeFails bool `yaml:"-"` + SetQuotaFails bool `yaml:"-"` + MinQuota map[liquid.ResourceName]uint64 `yaml:"-"` + MaxQuota map[liquid.ResourceName]uint64 `yaml:"-"` } // Init implements the core.QuotaPlugin interface. @@ -160,6 +163,23 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr return nil, nil, errors.New("Scrape failed as requested") } + if len(p.LiquidServiceInfo.Resources) > 0 { + err := plugins.CheckResourceTopologies(p.LiquidServiceInfo) + if err != nil { + return nil, nil, err + } + } + + if len(p.ReportedAZs) > 0 { + for resourceName, resource := range p.LiquidServiceInfo.Resources { + toplogy := resource.Topology + err := plugins.MatchLiquidReportToTopology(p.ReportedAZs, toplogy) + if err != nil { + return nil, nil, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err) + } + } + } + result = make(map[liquid.ResourceName]core.ResourceData) for key, val := range p.StaticResourceData { copyOfVal := core.ResourceData{ From cf9aeffff7d1db3f8a53d07ef679df3aee62a365 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Mon, 25 Nov 2024 17:32:23 +0100 Subject: [PATCH 06/42] Skip check on empty topologies --- internal/plugins/utils.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index 2e2e42c2a..0f723e3e9 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -43,6 +43,9 @@ func CheckResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { for _, resourceName := range resourceNames { topology := resources[liquid.ResourceName(resourceName)].Topology + if topology == "" { + continue + } if !topology.IsValid() { errs = append(errs, fmt.Errorf("invalid toplogy: %s on resource: %s", topology, resourceName)) } From b37fab296c8b3db5b8ba88ac6b8322f302e0b227 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Tue, 26 Nov 2024 14:32:48 +0100 Subject: [PATCH 07/42] read and write az quota to backend_quota field implementation of AZSeparatedResourceTopology --- internal/collector/scrape.go | 6 ++++++ internal/core/data.go | 1 + internal/db/migrations.go | 8 ++++++++ internal/db/models.go | 1 + internal/plugins/liquid.go | 5 ++++- 5 files changed, 20 insertions(+), 1 deletion(-) diff --git a/internal/collector/scrape.go b/internal/collector/scrape.go index f37f62d40..dd383503d 100644 --- a/internal/collector/scrape.go +++ b/internal/collector/scrape.go @@ -303,6 +303,12 @@ func (c *Collector) writeResourceScrapeResult(dbDomain db.Domain, dbProject db.P azRes.Usage = data.Usage azRes.PhysicalUsage = data.PhysicalUsage + // set AZ backend quota + resInfo := c.Cluster.InfoForResource(srv.Type, res.Name) + if resInfo.HasQuota && resInfo.Topology == liquid.AZSeparatedResourceTopology { + azRes.BackendQuota = &data.Quota + } + // warn when the backend is inconsistent with itself if data.Subresources != nil && uint64(len(data.Subresources)) != data.Usage { logg.Info("resource quantity mismatch in project %s, resource %s/%s, AZ %s: usage = %d, but found %d subresources", diff --git a/internal/core/data.go b/internal/core/data.go index 83a0fafbd..f06cf878e 100644 --- a/internal/core/data.go +++ b/internal/core/data.go @@ -190,6 +190,7 @@ func (r ResourceData) AddLocalizedUsage(az limes.AvailabilityZone, usage uint64) // UsageData contains usage data for a single project resource. // It appears in type ResourceData. type UsageData struct { + Quota int64 Usage uint64 PhysicalUsage *uint64 // only supported by some plugins Subresources []any // only if supported by plugin and enabled in config diff --git a/internal/db/migrations.go b/internal/db/migrations.go index a08ba7819..f536b5729 100644 --- a/internal/db/migrations.go +++ b/internal/db/migrations.go @@ -179,4 +179,12 @@ var sqlMigrations = map[string]string{ ALTER TABLE project_resources RENAME COLUMN max_quota_from_admin TO max_quota_from_outside_admin; `, + "046_service_specific_quota_constraints.down.sql": ` + ALTER TABLE project_az_resources + ADD backend_quota BIGINT default NULL; + `, + "046_service_specific_quota_constraints.up.sql": ` + ALTER TABLE project_az_resources + DROP COLUMN backend_quota; + `, } diff --git a/internal/db/models.go b/internal/db/models.go index 335f457e2..cd976ca9c 100644 --- a/internal/db/models.go +++ b/internal/db/models.go @@ -138,6 +138,7 @@ type ProjectAZResource struct { ResourceID ProjectResourceID `db:"resource_id"` AvailabilityZone limes.AvailabilityZone `db:"az"` Quota *uint64 `db:"quota"` + BackendQuota *int64 `db:"backend_quota"` Usage uint64 `db:"usage"` PhysicalUsage *uint64 `db:"physical_usage"` SubresourcesJSON string `db:"subresources"` diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index fba19f7d6..a17a3772f 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -133,7 +133,7 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro } result = make(map[liquid.ResourceName]core.ResourceData, len(p.LiquidServiceInfo.Resources)) - for resName := range p.LiquidServiceInfo.Resources { + for resName, resInfo := range p.LiquidServiceInfo.Resources { resReport := resp.Resources[resName] if resReport == nil { return nil, nil, fmt.Errorf("missing report for resource %q", resName) @@ -154,6 +154,9 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro PhysicalUsage: azReport.PhysicalUsage, Subresources: castSliceToAny(azReport.Subresources), } + if resInfo.Topology == liquid.AZSeparatedResourceTopology && azReport.Quota != nil { + resData.UsageData[az].Quota = *azReport.Quota + } } result[resName] = resData From d8be5e749fa0ed308f4f1d91de0793b497bc237b Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 29 Nov 2024 14:41:11 +0100 Subject: [PATCH 08/42] Add backend sync logic separated AZ quota will now sync the resource quota and AZ quota to the backend --- internal/collector/scrape.go | 8 +- internal/collector/scrape_test.go | 91 ++++++++++++++++++++- internal/collector/sync_quota_to_backend.go | 63 ++++++++++++-- internal/core/plugin.go | 7 +- internal/db/migrations.go | 8 +- internal/plugins/liquid.go | 6 +- internal/plugins/nova.go | 6 +- internal/test/plugins/quota_generic.go | 37 +++++---- internal/test/plugins/quota_noop.go | 2 +- main.go | 4 +- 10 files changed, 193 insertions(+), 39 deletions(-) diff --git a/internal/collector/scrape.go b/internal/collector/scrape.go index dd383503d..e660faadb 100644 --- a/internal/collector/scrape.go +++ b/internal/collector/scrape.go @@ -303,10 +303,12 @@ func (c *Collector) writeResourceScrapeResult(dbDomain db.Domain, dbProject db.P azRes.Usage = data.Usage azRes.PhysicalUsage = data.PhysicalUsage - // set AZ backend quota + // set AZ backend quota. Do not set backend quota to the automatically created any AZ. resInfo := c.Cluster.InfoForResource(srv.Type, res.Name) - if resInfo.HasQuota && resInfo.Topology == liquid.AZSeparatedResourceTopology { - azRes.BackendQuota = &data.Quota + if resInfo.Topology == liquid.AZSeparatedResourceTopology && resInfo.HasQuota { + if azRes.AvailabilityZone != liquid.AvailabilityZoneAny { + azRes.BackendQuota = &data.Quota + } } // warn when the backend is inconsistent with itself diff --git a/internal/collector/scrape_test.go b/internal/collector/scrape_test.go index defa860e3..9438cb0c4 100644 --- a/internal/collector/scrape_test.go +++ b/internal/collector/scrape_test.go @@ -593,25 +593,114 @@ func Test_TopologyScrapes(t *testing.T) { c := getCollector(t, s) job := c.ResourceScrapeJob(s.Registry) withLabel := jobloop.WithLabel("service_type", "unittest") + syncJob := c.SyncQuotaToBackendJob(s.Registry) plugin := s.Cluster.QuotaPlugins["unittest"].(*plugins.GenericQuotaPlugin) - _, tr0 := easypg.NewTracker(t, s.DB.Db) + tr, tr0 := easypg.NewTracker(t, s.DB.Db) tr0.AssertEqualToFile("fixtures/scrape0.sql") var status *any + // positive: Sync az-separated quota values with the backend + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZSeparatedResourceTopology}} + plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"az-one": status, "az-two": status} + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + + scrapedAt1 := s.Clock.Now().Add(-5 * time.Second) + scrapedAt2 := s.Clock.Now() + tr.DBChanges().AssertEqualf(` + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (1, 1, 'any', 0, '{"t":[%[1]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (10, 4, 'any', 0, '{"t":[%[3]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (11, 4, 'az-one', 0, 0, '{"t":[%[3]d],"v":[0]}', 50); + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (12, 4, 'az-two', 0, 0, '{"t":[%[3]d],"v":[0]}', 50); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (13, 5, 'any', 0, '{"t":[%[3]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (14, 5, 'az-one', 0, '{"t":[%[3]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (15, 5, 'az-two', 0, '{"t":[%[3]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (16, 6, 'any', 0, '{"t":[%[3]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (17, 6, 'az-one', 2, '[{"index":0},{"index":1}]', '{"t":[%[3]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (18, 6, 'az-two', 2, '[{"index":2},{"index":3}]', '{"t":[%[3]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (2, 1, 'az-one', 0, 0, '{"t":[%[1]d],"v":[0]}', 50); + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (3, 1, 'az-two', 0, 0, '{"t":[%[1]d],"v":[0]}', 50); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (4, 2, 'any', 0, '{"t":[%[1]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (5, 2, 'az-one', 0, '{"t":[%[1]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (6, 2, 'az-two', 0, '{"t":[%[1]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (7, 3, 'any', 0, '{"t":[%[1]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (8, 3, 'az-one', 2, '[{"index":0},{"index":1}]', '{"t":[%[1]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (9, 3, 'az-two', 2, '[{"index":2},{"index":3}]', '{"t":[%[1]d],"v":[2]}', 21); + INSERT INTO project_resources (id, service_id, name, quota, backend_quota) VALUES (1, 1, 'capacity', 0, 100); + INSERT INTO project_resources (id, service_id, name) VALUES (2, 1, 'capacity_portion'); + INSERT INTO project_resources (id, service_id, name, quota, backend_quota) VALUES (3, 1, 'things', 0, 42); + INSERT INTO project_resources (id, service_id, name, quota, backend_quota) VALUES (4, 2, 'capacity', 0, 100); + INSERT INTO project_resources (id, service_id, name) VALUES (5, 2, 'capacity_portion'); + INSERT INTO project_resources (id, service_id, name, quota, backend_quota) VALUES (6, 2, 'things', 0, 42); + UPDATE project_services SET scraped_at = %[1]d, scrape_duration_secs = 5, serialized_metrics = '{"capacity_usage":0,"things_usage":4}', checked_at = %[1]d, next_scrape_at = %[2]d, quota_desynced_at = %[1]d WHERE id = 1 AND project_id = 1 AND type = 'unittest'; + UPDATE project_services SET scraped_at = %[3]d, scrape_duration_secs = 5, serialized_metrics = '{"capacity_usage":0,"things_usage":4}', checked_at = %[3]d, next_scrape_at = %[4]d, quota_desynced_at = %[3]d WHERE id = 2 AND project_id = 2 AND type = 'unittest'; + `, + scrapedAt1.Unix(), scrapedAt1.Add(scrapeInterval).Unix(), + scrapedAt2.Unix(), scrapedAt2.Add(scrapeInterval).Unix(), + ) + + // set some quota acpq values. + // resource level + _, err := s.DB.Exec(`UPDATE project_resources SET quota = $1 WHERE name = $2`, 20, "capacity") + if err != nil { + t.Fatal(err) + } + _, err = s.DB.Exec(`UPDATE project_resources SET quota = $1 WHERE name = $2`, 13, "things") + if err != nil { + t.Fatal(err) + } + // az level + _, err = s.DB.Exec(`UPDATE project_az_resources SET quota = $1 WHERE resource_id IN (1,4) and az != 'any'`, 20) + if err != nil { + t.Fatal(err) + } + _, err = s.DB.Exec(`UPDATE project_az_resources SET quota = $1 WHERE resource_id IN (3,6) and az != 'any'`, 13) + if err != nil { + t.Fatal(err) + } + tr.DBChanges().Ignore() + + mustT(t, syncJob.ProcessOne(s.Ctx, withLabel)) + mustT(t, syncJob.ProcessOne(s.Ctx, withLabel)) + + tr.DBChanges().AssertEqualf(` + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 11 AND resource_id = 4 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 12 AND resource_id = 4 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 17 AND resource_id = 6 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 18 AND resource_id = 6 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 2 AND resource_id = 1 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 3 AND resource_id = 1 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 8 AND resource_id = 3 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 9 AND resource_id = 3 AND az = 'az-two'; + UPDATE project_resources SET backend_quota = 20 WHERE id = 1 AND service_id = 1 AND name = 'capacity'; + UPDATE project_resources SET backend_quota = 13 WHERE id = 3 AND service_id = 1 AND name = 'things'; + UPDATE project_resources SET backend_quota = 20 WHERE id = 4 AND service_id = 2 AND name = 'capacity'; + UPDATE project_resources SET backend_quota = 13 WHERE id = 6 AND service_id = 2 AND name = 'things'; + UPDATE project_services SET quota_desynced_at = NULL, quota_sync_duration_secs = 5 WHERE id = 1 AND project_id = 1 AND type = 'unittest'; + UPDATE project_services SET quota_desynced_at = NULL, quota_sync_duration_secs = 5 WHERE id = 2 AND project_id = 2 AND type = 'unittest'; + `) + + s.Clock.StepBy(scrapeInterval) + + // negative: scrape with flat topology returns invalid AZs plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.FlatResourceTopology}} plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"az-one": status, "az-two": status} mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with toplogy type: flat returned AZs: [az-one az-two]")) + // negative: scrape with az-aware toplogy returns invalid any AZ plugin.LiquidServiceInfo.Resources["capacity"] = liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology} plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"any": status} mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: service: unittest, resource: capacity: scrape with toplogy type: az-aware returned AZs: [any]")) s.Clock.StepBy(scrapeInterval) + // negative: scrape with az-separated toplogy returns invalid AZs any and unknown plugin.LiquidServiceInfo.Resources["capacity"] = liquid.ResourceInfo{Topology: liquid.AZSeparatedResourceTopology} plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"az-one": status, "unknown": status} mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with toplogy type: az-separated returned AZs: [az-one unknown]")) + // negative: reject liquid initialization with invalid topologies plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: "invalidAZ1"}, "things": {Topology: "invalidAZ2"}} mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: invalid toplogy: invalidAZ1 on resource: capacity\ninvalid toplogy: invalidAZ2 on resource: things")) + } diff --git a/internal/collector/sync_quota_to_backend.go b/internal/collector/sync_quota_to_backend.go index 65643c13e..6c1cd5daa 100644 --- a/internal/collector/sync_quota_to_backend.go +++ b/internal/collector/sync_quota_to_backend.go @@ -93,15 +93,30 @@ func (c *Collector) processQuotaSyncTask(ctx context.Context, srv db.ProjectServ var ( quotaSyncSelectQuery = sqlext.SimplifyWhitespace(` - SELECT name, backend_quota, quota + SELECT id, name, backend_quota, quota FROM project_resources WHERE service_id = $1 AND quota IS NOT NULL `) + azQuotaSyncSelectQuery = sqlext.SimplifyWhitespace(` + SELECT az, backend_quota, quota + FROM project_az_resources + WHERE resource_id = $1 AND quota IS NOT NULL + `) quotaSyncMarkResourcesAsAppliedQuery = sqlext.SimplifyWhitespace(` UPDATE project_resources SET backend_quota = quota WHERE service_id = $1 `) + azQuotaSyncMarkResourcesAsAppliedQuery = sqlext.SimplifyWhitespace(` + WITH resourceIDs as ( + SELECT id + FROM project_resources + WHERE service_id = $1 + ) + UPDATE project_az_resources + SET backend_quota = quota + WHERE resource_id in (SELECT id from resourceIDs) + `) quotaSyncMarkServiceAsAppliedQuery = sqlext.SimplifyWhitespace(` UPDATE project_services SET quota_desynced_at = NULL, quota_sync_duration_secs = $2 @@ -123,14 +138,17 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, // collect backend quota values that we want to apply targetQuotasInDB := make(map[liquid.ResourceName]uint64) + targetAZQuotasInDB := make(map[liquid.ResourceName]map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest) needsApply := false + azSeparatedNeedsApply := false err := sqlext.ForeachRow(c.DB, quotaSyncSelectQuery, []any{srv.ID}, func(rows *sql.Rows) error { var ( + resourceID db.ProjectResourceID resourceName liquid.ResourceName currentQuota *int64 targetQuota uint64 ) - err := rows.Scan(&resourceName, ¤tQuota, &targetQuota) + err := rows.Scan(&resourceID, &resourceName, ¤tQuota, &targetQuota) if err != nil { return err } @@ -138,21 +156,50 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, if currentQuota == nil || *currentQuota < 0 || uint64(*currentQuota) != targetQuota { needsApply = true } + + resInfo := c.Cluster.InfoForResource(srv.Type, resourceName) + if resInfo.Topology != liquid.AZSeparatedResourceTopology { + return nil + } + err = sqlext.ForeachRow(c.DB, azQuotaSyncSelectQuery, []any{resourceID}, func(rows *sql.Rows) error { + var ( + availabilityZone liquid.AvailabilityZone + currentAZQuota *int64 + targetAZQuota uint64 + ) + err := rows.Scan(&availabilityZone, ¤tAZQuota, &targetAZQuota) + if err != nil { + return err + } + // defense in depth: configured backend_quota for AZ any or unknown are not valid for the azSeparatedQuota topology. + if (availabilityZone == liquid.AvailabilityZoneAny || availabilityZone == liquid.AvailabilityZoneUnknown) && currentAZQuota != nil { + return fmt.Errorf("detected invalid AZ: %s for resource: %s with topology: %s has backend_quota: %v", availabilityZone, resourceName, resInfo.Topology, currentAZQuota) + } + targetAZQuotasInDB[resourceName] = make(map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest) + targetAZQuotasInDB[resourceName][availabilityZone] = liquid.AZResourceQuotaRequest{Quota: targetAZQuota} + if currentAZQuota == nil || *currentAZQuota < 0 || uint64(*currentAZQuota) != targetAZQuota { + azSeparatedNeedsApply = true + } + return nil + }) + if err != nil { + return err + } return nil }) if err != nil { return fmt.Errorf("while collecting target quota values for %s backend: %w", srv.Type, err) } - if needsApply { + if needsApply || azSeparatedNeedsApply { // double-check that we only include quota values for resources that the backend currently knows about - targetQuotasForBackend := make(map[liquid.ResourceName]uint64) + targetQuotasForBackend := make(map[liquid.ResourceName]core.Quotas) for resName, resInfo := range plugin.Resources() { if !resInfo.HasQuota { continue } //NOTE: If `targetQuotasInDB` does not have an entry for this resource, we will write 0 into the backend. - targetQuotasForBackend[resName] = targetQuotasInDB[resName] + targetQuotasForBackend[resName] = core.Quotas{QuotaForResource: targetQuotasInDB[resName], QuotasForAZs: targetAZQuotasInDB[resName]} } // apply quotas in backend @@ -172,6 +219,12 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, if err != nil { return err } + if azSeparatedNeedsApply { + _, err = c.DB.Exec(azQuotaSyncMarkResourcesAsAppliedQuery, srv.ID) + if err != nil { + return err + } + } } finishedAt := c.MeasureTimeAtEnd() diff --git a/internal/core/plugin.go b/internal/core/plugin.go index 218b38bf8..316cde88c 100644 --- a/internal/core/plugin.go +++ b/internal/core/plugin.go @@ -141,7 +141,7 @@ type QuotaPlugin interface { // SetQuota updates the backend service's quotas for the given project in the // given domain to the values specified here. The map is guaranteed to contain // values for all resources defined by Resources(). - SetQuota(ctx context.Context, project KeystoneProject, quotas map[liquid.ResourceName]uint64) error + SetQuota(ctx context.Context, project KeystoneProject, quotas map[liquid.ResourceName]Quotas) error // Rates returns metadata for all the rates that this plugin scrapes // from the backend service. @@ -181,6 +181,11 @@ type QuotaPlugin interface { CollectMetrics(ch chan<- prometheus.Metric, project KeystoneProject, serializedMetrics []byte) error } +type Quotas struct { + QuotaForResource uint64 + QuotasForAZs map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest +} + // ServiceInfo is a reduced version of type limes.ServiceInfo, suitable for // being returned from func QuotaPlugin.ServiceInfo(). type ServiceInfo struct { diff --git a/internal/db/migrations.go b/internal/db/migrations.go index f536b5729..ea5659fbb 100644 --- a/internal/db/migrations.go +++ b/internal/db/migrations.go @@ -179,12 +179,12 @@ var sqlMigrations = map[string]string{ ALTER TABLE project_resources RENAME COLUMN max_quota_from_admin TO max_quota_from_outside_admin; `, - "046_service_specific_quota_constraints.down.sql": ` + "046_az_backend_quota.down.sql": ` ALTER TABLE project_az_resources - ADD backend_quota BIGINT default NULL; + DROP COLUMN backend_quota; `, - "046_service_specific_quota_constraints.up.sql": ` + "046_az_backend_quota.up.sql": ` ALTER TABLE project_az_resources - DROP COLUMN backend_quota; + ADD COLUMN backend_quota BIGINT default NULL; `, } diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index a17a3772f..99c59801b 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -189,12 +189,12 @@ func castSliceToAny[T any](input []T) (output []any) { } // SetQuota implements the core.QuotaPlugin interface. -func (p *liquidQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]uint64) error { +func (p *liquidQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]core.Quotas) error { req := liquid.ServiceQuotaRequest{ Resources: make(map[liquid.ResourceName]liquid.ResourceQuotaRequest, len(quotas)), } - for resName, quota := range quotas { - req.Resources[resName] = liquid.ResourceQuotaRequest{Quota: quota} + for resName, quotas := range quotas { + req.Resources[resName] = liquid.ResourceQuotaRequest{Quota: quotas.QuotaForResource, PerAZ: quotas.QuotasForAZs} } if p.LiquidServiceInfo.QuotaUpdateNeedsProjectMetadata { req.ProjectMetadata = project.ForLiquid() diff --git a/internal/plugins/nova.go b/internal/plugins/nova.go index 81df22afb..6b0eee8e7 100644 --- a/internal/plugins/nova.go +++ b/internal/plugins/nova.go @@ -345,11 +345,11 @@ func (p *novaPlugin) pooledResourceName(hwVersion string, base liquid.ResourceNa } // SetQuota implements the core.QuotaPlugin interface. -func (p *novaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]uint64) error { +func (p *novaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]core.Quotas) error { // translate Limes resource names for separate instance quotas into Nova quota names novaQuotas := make(novaQuotaUpdateOpts, len(quotas)) - for resourceName, quota := range quotas { - novaQuotas[string(resourceName)] = quota + for resourceName, quotas := range quotas { + novaQuotas[string(resourceName)] = quotas.QuotaForResource } return quotasets.Update(ctx, p.NovaV2, project.UUID, novaQuotas).Err diff --git a/internal/test/plugins/quota_generic.go b/internal/test/plugins/quota_generic.go index 898359257..9479439ff 100644 --- a/internal/test/plugins/quota_generic.go +++ b/internal/test/plugins/quota_generic.go @@ -45,12 +45,12 @@ func init() { // mostly reports static data and offers several controls to simulate failed // operations. type GenericQuotaPlugin struct { - ServiceType db.ServiceType `yaml:"-"` - LiquidServiceInfo liquid.ServiceInfo `yaml:"-"` - StaticRateInfos map[liquid.RateName]liquid.RateInfo `yaml:"rate_infos"` - StaticResourceData map[liquid.ResourceName]*core.ResourceData `yaml:"-"` - StaticResourceAttributes map[liquid.ResourceName]map[string]any `yaml:"-"` - OverrideQuota map[string]map[liquid.ResourceName]uint64 `yaml:"-"` // first key is project UUID + ServiceType db.ServiceType `yaml:"-"` + LiquidServiceInfo liquid.ServiceInfo `yaml:"-"` + StaticRateInfos map[liquid.RateName]liquid.RateInfo `yaml:"rate_infos"` + StaticResourceData map[liquid.ResourceName]*core.ResourceData `yaml:"-"` + StaticResourceAttributes map[liquid.ResourceName]map[string]any `yaml:"-"` + OverrideQuota map[string]map[liquid.ResourceName]core.Quotas `yaml:"-"` // first key is project UUID // behavior flags that can be set by a unit test ReportedAZs map[liquid.AvailabilityZone]*any `yaml:"-"` ScrapeFails bool `yaml:"-"` @@ -66,19 +66,19 @@ func (p *GenericQuotaPlugin) Init(ctx context.Context, provider *gophercloud.Pro "things": { Quota: 42, UsageData: core.PerAZ[core.UsageData]{ - "az-one": {Usage: 2}, - "az-two": {Usage: 2}, + "az-one": {Usage: 2, Quota: 21}, + "az-two": {Usage: 2, Quota: 21}, }, }, "capacity": { Quota: 100, UsageData: core.PerAZ[core.UsageData]{ - "az-one": {Usage: 0}, - "az-two": {Usage: 0}, + "az-one": {Usage: 0, Quota: 50}, + "az-two": {Usage: 0, Quota: 50}, }, }, } - p.OverrideQuota = make(map[string]map[liquid.ResourceName]uint64) + p.OverrideQuota = make(map[string]map[liquid.ResourceName]core.Quotas) return nil } @@ -98,9 +98,9 @@ func (p *GenericQuotaPlugin) ServiceInfo() core.ServiceInfo { // Resources implements the core.QuotaPlugin interface. func (p *GenericQuotaPlugin) Resources() map[liquid.ResourceName]liquid.ResourceInfo { result := map[liquid.ResourceName]liquid.ResourceInfo{ - "capacity": {Unit: limes.UnitBytes, HasQuota: true}, - "capacity_portion": {Unit: limes.UnitBytes, HasQuota: false}, // NOTE: This used to be `ContainedIn: "capacity"` before we removed support for this relation. - "things": {Unit: limes.UnitNone, HasQuota: true}, + "capacity": {Unit: limes.UnitBytes, HasQuota: true, Topology: p.LiquidServiceInfo.Resources["capacity"].Topology}, + "capacity_portion": {Unit: limes.UnitBytes, HasQuota: false, Topology: p.LiquidServiceInfo.Resources["capacity_portion"].Topology}, // NOTE: This used to be `ContainedIn: "capacity"` before we removed support for this relation. + "things": {Unit: limes.UnitNone, HasQuota: true, Topology: p.LiquidServiceInfo.Resources["things"].Topology}, } for resName, resInfo := range result { @@ -187,6 +187,11 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr UsageData: val.UsageData.Clone(), } + // populate azSeparatedQuota + for az, data := range copyOfVal.UsageData { + data.Quota = val.UsageData[az].Quota + } + // test coverage for PhysicalUsage != Usage if key == "capacity" { for _, data := range copyOfVal.UsageData { @@ -221,7 +226,7 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr if exists { for resourceName, quota := range data { resData := result[resourceName] - resData.Quota = int64(quota) //nolint:gosec // uint64 -> int64 would only fail if quota is bigger than 2^63 + resData.Quota = int64(quota.QuotaForResource) //nolint:gosec // uint64 -> int64 would only fail if quota is bigger than 2^63 result[resourceName] = resData } } @@ -249,7 +254,7 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr } // SetQuota implements the core.QuotaPlugin interface. -func (p *GenericQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]uint64) error { +func (p *GenericQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]core.Quotas) error { if p.SetQuotaFails { return errors.New("SetQuota failed as requested") } diff --git a/internal/test/plugins/quota_noop.go b/internal/test/plugins/quota_noop.go index c2ce8f9c9..27d36e211 100644 --- a/internal/test/plugins/quota_noop.go +++ b/internal/test/plugins/quota_noop.go @@ -120,6 +120,6 @@ func (p *NoopQuotaPlugin) BuildServiceUsageRequest(project core.KeystoneProject, } // SetQuota implements the core.QuotaPlugin interface. -func (p *NoopQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]uint64) error { +func (p *NoopQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]core.Quotas) error { return nil } diff --git a/main.go b/main.go index f86f7a746..2632fa11a 100644 --- a/main.go +++ b/main.go @@ -453,7 +453,7 @@ func taskTestSetQuota(ctx context.Context, cluster *core.Cluster, args []string) project := must.Return(findProjectForTesting(ctx, cluster, args[0])) quotaValueRx := regexp.MustCompile(`^([^=]+)=(\d+)$`) - quotaValues := make(map[liquid.ResourceName]uint64) + quotaValues := make(map[liquid.ResourceName]core.Quotas) for _, arg := range args[2:] { match := quotaValueRx.FindStringSubmatch(arg) if match == nil { @@ -463,7 +463,7 @@ func taskTestSetQuota(ctx context.Context, cluster *core.Cluster, args []string) if err != nil { logg.Fatal(err.Error()) } - quotaValues[liquid.ResourceName(match[1])] = val + quotaValues[liquid.ResourceName(match[1])] = core.Quotas{QuotaForResource: val} } must.Succeed(cluster.QuotaPlugins[serviceType].SetQuota(ctx, project, quotaValues)) From e9e681ec4a1a39a49e17a8af0646b4693a9dd4fb Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 29 Nov 2024 15:54:42 +0100 Subject: [PATCH 09/42] Fix linter --- internal/collector/scrape_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/collector/scrape_test.go b/internal/collector/scrape_test.go index 9438cb0c4..3e7af5655 100644 --- a/internal/collector/scrape_test.go +++ b/internal/collector/scrape_test.go @@ -702,5 +702,4 @@ func Test_TopologyScrapes(t *testing.T) { // negative: reject liquid initialization with invalid topologies plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: "invalidAZ1"}, "things": {Topology: "invalidAZ2"}} mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: invalid toplogy: invalidAZ1 on resource: capacity\ninvalid toplogy: invalidAZ2 on resource: things")) - } From b901e1cb95a9f57ff195cf03650ac75998f3c8f0 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 29 Nov 2024 17:06:19 +0100 Subject: [PATCH 10/42] Add az backend_quota reset if toplogy changes from separated to az aware --- internal/collector/scrape.go | 13 +++++++++++++ internal/collector/scrape_test.go | 20 ++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/internal/collector/scrape.go b/internal/collector/scrape.go index e660faadb..af9722961 100644 --- a/internal/collector/scrape.go +++ b/internal/collector/scrape.go @@ -64,6 +64,12 @@ var ( WHERE pr.service_id = $1 `) + resetAZBackendQuotas = sqlext.SimplifyWhitespace(` + UPDATE project_az_resources + SET backend_quota = NULL + WHERE resource_id = $1 and backend_quota IS NOT NULL + `) + writeResourceScrapeSuccessQuery = sqlext.SimplifyWhitespace(` UPDATE project_services SET -- timing information @@ -310,6 +316,13 @@ func (c *Collector) writeResourceScrapeResult(dbDomain db.Domain, dbProject db.P azRes.BackendQuota = &data.Quota } } + // reset backendQuota entries for topology changes + if resInfo.Topology != liquid.AZSeparatedResourceTopology { + _, err = c.DB.Exec(resetAZBackendQuotas, azRes.ResourceID) + if err != nil { + return fmt.Errorf("AZ backend quota reset for resource %s failed", resourceName) + } + } // warn when the backend is inconsistent with itself if data.Subresources != nil && uint64(len(data.Subresources)) != data.Usage { diff --git a/internal/collector/scrape_test.go b/internal/collector/scrape_test.go index 3e7af5655..5b7ff21a2 100644 --- a/internal/collector/scrape_test.go +++ b/internal/collector/scrape_test.go @@ -683,6 +683,26 @@ func Test_TopologyScrapes(t *testing.T) { s.Clock.StepBy(scrapeInterval) + // toplogy of a resource changes. Reset AZ-separated backend_quota + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZAwareResourceTopology}} + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + + tr.DBChanges().AssertEqualf(` + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 11 AND resource_id = 4 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 12 AND resource_id = 4 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 17 AND resource_id = 6 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 18 AND resource_id = 6 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 2 AND resource_id = 1 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 3 AND resource_id = 1 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 8 AND resource_id = 3 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 9 AND resource_id = 3 AND az = 'az-two'; + UPDATE project_services SET scraped_at = 1825, checked_at = 1825, next_scrape_at = 3625 WHERE id = 1 AND project_id = 1 AND type = 'unittest'; + UPDATE project_services SET scraped_at = 1830, checked_at = 1830, next_scrape_at = 3630 WHERE id = 2 AND project_id = 2 AND type = 'unittest'; + `) + + s.Clock.StepBy(scrapeInterval) + // negative: scrape with flat topology returns invalid AZs plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.FlatResourceTopology}} plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"az-one": status, "az-two": status} From 5f5386f7c825fbeb541e71a229614197747730d8 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Mon, 2 Dec 2024 11:33:21 +0100 Subject: [PATCH 11/42] Simplify backendquota reset on non AZ separated resources --- internal/collector/scrape.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/internal/collector/scrape.go b/internal/collector/scrape.go index af9722961..1763fbb5c 100644 --- a/internal/collector/scrape.go +++ b/internal/collector/scrape.go @@ -64,12 +64,6 @@ var ( WHERE pr.service_id = $1 `) - resetAZBackendQuotas = sqlext.SimplifyWhitespace(` - UPDATE project_az_resources - SET backend_quota = NULL - WHERE resource_id = $1 and backend_quota IS NOT NULL - `) - writeResourceScrapeSuccessQuery = sqlext.SimplifyWhitespace(` UPDATE project_services SET -- timing information @@ -318,10 +312,7 @@ func (c *Collector) writeResourceScrapeResult(dbDomain db.Domain, dbProject db.P } // reset backendQuota entries for topology changes if resInfo.Topology != liquid.AZSeparatedResourceTopology { - _, err = c.DB.Exec(resetAZBackendQuotas, azRes.ResourceID) - if err != nil { - return fmt.Errorf("AZ backend quota reset for resource %s failed", resourceName) - } + azRes.BackendQuota = nil } // warn when the backend is inconsistent with itself From 1c41b2fb24eed7655b5970d5b928f303f1e15688 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Mon, 2 Dec 2024 17:45:50 +0100 Subject: [PATCH 12/42] Add error report for all reported resources Add subfunction to sort map keys to keep the order of errors uniform --- internal/collector/scrape_test.go | 6 ++++++ internal/plugins/capacity_liquid.go | 13 +++++++++---- internal/plugins/liquid.go | 13 +++++++++---- internal/plugins/utils.go | 17 +++++++++++------ internal/test/plugins/quota_generic.go | 11 ++++++++--- 5 files changed, 43 insertions(+), 17 deletions(-) diff --git a/internal/collector/scrape_test.go b/internal/collector/scrape_test.go index 5b7ff21a2..726fa5117 100644 --- a/internal/collector/scrape_test.go +++ b/internal/collector/scrape_test.go @@ -722,4 +722,10 @@ func Test_TopologyScrapes(t *testing.T) { // negative: reject liquid initialization with invalid topologies plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: "invalidAZ1"}, "things": {Topology: "invalidAZ2"}} mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: invalid toplogy: invalidAZ1 on resource: capacity\ninvalid toplogy: invalidAZ2 on resource: things")) + + s.Clock.StepBy(scrapeInterval) + // negative: multiple resources with mismatching topology to AZ responses + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZSeparatedResourceTopology}} + plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"unknown": status} + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with toplogy type: az-separated returned AZs: [unknown]\nservice: unittest, resource: things: scrape with toplogy type: az-separated returned AZs: [unknown]")) } diff --git a/internal/plugins/capacity_liquid.go b/internal/plugins/capacity_liquid.go index 3542ac049..79c04f354 100644 --- a/internal/plugins/capacity_liquid.go +++ b/internal/plugins/capacity_liquid.go @@ -94,12 +94,17 @@ func (p *liquidCapacityPlugin) Scrape(ctx context.Context, backchannel core.Capa logg.Fatal("ServiceInfo version for %s changed from %d to %d; restarting now to reload ServiceInfo...", p.LiquidServiceType, p.LiquidServiceInfo.Version, resp.InfoVersion) } - for resourceName, resource := range p.LiquidServiceInfo.Resources { - perAZ := resp.Resources[resourceName].PerAZ - toplogy := resource.Topology + resourceNames := SortMapKeys(p.LiquidServiceInfo.Resources) + for _, resourceName := range resourceNames { + errs := []error{} + perAZ := resp.Resources[liquid.ResourceName(resourceName)].PerAZ + toplogy := p.LiquidServiceInfo.Resources[liquid.ResourceName(resourceName)].Topology err := MatchLiquidReportToTopology(perAZ, toplogy) if err != nil { - return nil, nil, fmt.Errorf("service: %s, resource: %s: %w", p.LiquidServiceType, resourceName, err) + errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) + } + if len(errs) > 0 { + return nil, nil, errors.Join(errs...) } } diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index 99c59801b..4b9b6a9d8 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -123,12 +123,17 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro logg.Fatal("ServiceInfo version for %s changed from %d to %d; restarting now to reload ServiceInfo...", p.LiquidServiceType, p.LiquidServiceInfo.Version, resp.InfoVersion) } - for resourceName, resource := range p.LiquidServiceInfo.Resources { - perAZ := resp.Resources[resourceName].PerAZ - toplogy := resource.Topology + resourceNames := SortMapKeys(p.LiquidServiceInfo.Resources) + for _, resourceName := range resourceNames { + errs := []error{} + perAZ := resp.Resources[liquid.ResourceName(resourceName)].PerAZ + toplogy := p.LiquidServiceInfo.Resources[liquid.ResourceName(resourceName)].Topology err := MatchLiquidReportToTopology(perAZ, toplogy) if err != nil { - return nil, nil, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err) + errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) + } + if len(errs) > 0 { + return nil, nil, errors.Join(errs...) } } diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index 0f723e3e9..1b8b238c0 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -31,16 +31,21 @@ func p2u64(val uint64) *uint64 { return &val } +func SortMapKeys[M map[K]V, K ~string, V any](mapToSort M) []string { + var sortedKeys []string + for key := range mapToSort { + sortedKeys = append(sortedKeys, string(key)) + } + sort.Strings(sortedKeys) + + return sortedKeys +} + func CheckResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { errs := []error{} resources := serviceInfo.Resources - var resourceNames []string - for resource := range resources { - resourceNames = append(resourceNames, string(resource)) - } - sort.Strings(resourceNames) - + resourceNames := SortMapKeys(resources) for _, resourceName := range resourceNames { topology := resources[liquid.ResourceName(resourceName)].Topology if topology == "" { diff --git a/internal/test/plugins/quota_generic.go b/internal/test/plugins/quota_generic.go index 9479439ff..4a37444f0 100644 --- a/internal/test/plugins/quota_generic.go +++ b/internal/test/plugins/quota_generic.go @@ -171,13 +171,18 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr } if len(p.ReportedAZs) > 0 { - for resourceName, resource := range p.LiquidServiceInfo.Resources { - toplogy := resource.Topology + errs := []error{} + resourceNames := plugins.SortMapKeys(p.LiquidServiceInfo.Resources) + for _, resourceName := range resourceNames { + toplogy := p.LiquidServiceInfo.Resources[liquid.ResourceName(resourceName)].Topology err := plugins.MatchLiquidReportToTopology(p.ReportedAZs, toplogy) if err != nil { - return nil, nil, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err) + errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) } } + if len(errs) > 0 { + return nil, nil, errors.Join(errs...) + } } result = make(map[liquid.ResourceName]core.ResourceData) From eabce421905cd9a05f41554ec4382f5bf0d4ce2d Mon Sep 17 00:00:00 2001 From: VoigtS Date: Mon, 2 Dec 2024 18:02:41 +0100 Subject: [PATCH 13/42] let all util functions use the map key sort --- internal/plugins/utils.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index 1b8b238c0..aff8d67b7 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -81,11 +81,6 @@ func MatchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone] return } - var reportedAZs []string - for az := range perAZReport { - reportedAZs = append(reportedAZs, string(az)) - } - sort.Strings(reportedAZs) - + reportedAZs := SortMapKeys(perAZReport) return fmt.Errorf("scrape with toplogy type: %s returned AZs: %v", topology, reportedAZs) } From 27c020a07ffca42fedf9735b3de8768ffe5a07dd Mon Sep 17 00:00:00 2001 From: VoigtS Date: Tue, 3 Dec 2024 19:32:56 +0100 Subject: [PATCH 14/42] cleanup: replace configOption with topology checks Remove config option CommitmentIsAZAware with resource toplogy checks. Modify unit tests to include toplogy configs where necessary --- docs/operators/config.md | 3 +-- internal/api/api_test.go | 4 ---- internal/api/commitment.go | 4 +++- internal/api/commitment_test.go | 19 +++++++++---------- internal/collector/capacity_scrape.go | 3 ++- internal/collector/capacity_scrape_test.go | 2 +- internal/collector/commitment_cleanup_test.go | 2 +- internal/core/resource_behavior.go | 4 ---- 8 files changed, 17 insertions(+), 24 deletions(-) diff --git a/docs/operators/config.md b/docs/operators/config.md index 8eea12f02..53c2e3e05 100644 --- a/docs/operators/config.md +++ b/docs/operators/config.md @@ -132,7 +132,6 @@ Some special behaviors for resources can be configured in the `resource_behavior | `resource_behavior[].resource` | yes | Must contain a regex. The behavior entry applies to all resources where this regex matches against a slash-concatenated pair of service type and resource name. The anchors `^` and `$` are implied at both ends, so the regex must match the entire phrase. | | `resource_behavior[].overcommit_factor` | no | If given, capacity for matching resources will be computed as `raw_capacity * overcommit_factor`, where `raw_capacity` is what the capacity plugin reports. | | `resource_behavior[].commitment_durations` | no | If given, commitments for this resource can be created with any of the given durations. The duration format is the same as in the `commitments[].duration` attribute that appears on the resource API. If empty, this resource does not accept commitments. | -| `resource_behavior[].commitment_is_az_aware` | no | If true, commitments for this resource must be created in a specific AZ (i.e. not in a pseudo-AZ). If false, commitments for this resource must be created in the pseudo-AZ `any`. Ignored if `commitment_durations` is empty. | | `resource_behavior[].commitment_min_confirm_date` | no | If given, commitments for this resource will always be created with `confirm_by` no earlier than this timestamp. This can be used to plan the introduction of commitments on a specific date. Ignored if `commitment_durations` is empty. | | `resource_behavior[].commitment_until_percent` | no | If given, commitments for this resource will only be confirmed while the total of all confirmed commitments or uncommitted usage in the respective AZ is smaller than the respective percentage of the total capacity for that AZ. This is intended to provide a reserved buffer for the growth quota configured by `quota_distribution_configs[].autogrow.growth_multiplier`. Defaults to 100, i.e. all capacity is committable. | | `resource_behavior[].commitment_conversion.identifier` | no | If given, must contain a string. Commitments for this resource will then be allowed to be converted into commitments for all resources that set the same conversion identifier. | @@ -147,7 +146,7 @@ resource_behavior: # matches both sharev2/share_capacity and sharev2/snapshot_capacity - { resource: sharev2/.*_capacity, overcommit_factor: 2 } # starting in 2024, offer commitments for Cinder storage - - { resource: volumev2/capacity, commitment_durations: [ 1 year, 2 years, 3 years ], commitment_is_az_aware: true, commitment_min_confirm_date: 2024-01-01T00:00:00Z } + - { resource: volumev2/capacity, commitment_durations: [ 1 year, 2 years, 3 years ], commitment_min_confirm_date: 2024-01-01T00:00:00Z } # an Ironic flavor has been renamed from "thebigbox" to "baremetal_large" - { resource: compute/instances_baremetal_large, identity_in_v1_api: compute/instances_thebigbox } ``` diff --git a/internal/api/api_test.go b/internal/api/api_test.go index 4d2d75e7b..21a021c6e 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -106,10 +106,6 @@ const ( - resource: 'shared/(capacity|things)$' commitment_durations: ["1 hour", "2 hours"] commitment_min_confirm_date: '1970-01-08T00:00:00Z' # one week after start of mock.Clock - - resource: 'shared/capacity$' - commitment_is_az_aware: true - - resource: shared/things - commitment_is_az_aware: false ` ) diff --git a/internal/api/commitment.go b/internal/api/commitment.go index 992685220..ff60da203 100644 --- a/internal/api/commitment.go +++ b/internal/api/commitment.go @@ -32,6 +32,7 @@ import ( "github.com/sapcc/go-api-declarations/cadf" "github.com/sapcc/go-api-declarations/limes" limesresources "github.com/sapcc/go-api-declarations/limes/resources" + "github.com/sapcc/go-api-declarations/liquid" "github.com/sapcc/go-bits/audittools" "github.com/sapcc/go-bits/gopherpolicy" "github.com/sapcc/go-bits/httpapi" @@ -234,11 +235,12 @@ func (p *v1Provider) parseAndValidateCommitmentRequest(w http.ResponseWriter, r return nil, nil, nil } behavior := p.Cluster.BehaviorForResource(dbServiceType, dbResourceName) + resInfo := p.Cluster.InfoForResource(dbServiceType, dbResourceName) if len(behavior.CommitmentDurations) == 0 { http.Error(w, "commitments are not enabled for this resource", http.StatusUnprocessableEntity) return nil, nil, nil } - if behavior.CommitmentIsAZAware { + if resInfo.Topology != liquid.FlatResourceTopology { if !slices.Contains(p.Cluster.Config.AvailabilityZones, req.AvailabilityZone) { http.Error(w, "no such availability zone", http.StatusUnprocessableEntity) return nil, nil, nil diff --git a/internal/api/commitment_test.go b/internal/api/commitment_test.go index 3190da007..acc95c768 100644 --- a/internal/api/commitment_test.go +++ b/internal/api/commitment_test.go @@ -25,10 +25,12 @@ import ( "testing" "time" + "github.com/sapcc/go-api-declarations/liquid" "github.com/sapcc/go-bits/assert" "github.com/sapcc/limes/internal/db" "github.com/sapcc/limes/internal/test" + "github.com/sapcc/limes/internal/test/plugins" ) const day = 24 * time.Hour @@ -48,9 +50,7 @@ const testCommitmentsYAML = ` commitment_durations: ["1 hour", "2 hours"] commitment_min_confirm_date: '1970-01-08T00:00:00Z' # one week after start of mock.Clock - resource: first/things - commitment_is_az_aware: false - resource: first/capacity - commitment_is_az_aware: true ` const testCommitmentsYAMLWithoutMinConfirmDate = ` availability_zones: [ az-one, az-two ] @@ -65,12 +65,6 @@ const testCommitmentsYAMLWithoutMinConfirmDate = ` # the resources in "first" have commitments, the ones in "second" do not - resource: second/.* commitment_durations: ["1 hour", "2 hours", "3 hours"] - - resource: second/things - commitment_is_az_aware: false - - resource: second/capacity - commitment_is_az_aware: true - - resource: second/capacity_portion - commitment_is_az_aware: true ` const testConvertCommitmentsYAML = ` @@ -95,10 +89,8 @@ const testConvertCommitmentsYAML = ` - resource: third/.* commitment_durations: ["1 hour", "2 hours"] - resource: first/capacity - commitment_is_az_aware: true commitment_conversion: {identifier: flavor1, weight: 48} - resource: second/capacity - commitment_is_az_aware: true commitment_conversion: {identifier: flavor1, weight: 32} - resource: third/capacity_c32 commitment_conversion: {identifier: flavor1, weight: 32} @@ -118,6 +110,10 @@ func TestCommitmentLifecycleWithDelayedConfirmation(t *testing.T) { test.WithConfig(testCommitmentsYAML), test.WithAPIHandler(NewV1API), ) + plugin := s.Cluster.QuotaPlugins["first"].(*plugins.GenericQuotaPlugin) + plugin2 := s.Cluster.QuotaPlugins["second"].(*plugins.GenericQuotaPlugin) + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZAwareResourceTopology}, "things": {Topology: liquid.FlatResourceTopology}} + plugin2.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZAwareResourceTopology}, "things": {Topology: liquid.FlatResourceTopology}} // GET returns an empty list if there are no commitments assert.HTTPRequest{ @@ -477,6 +473,9 @@ func TestPutCommitmentErrorCases(t *testing.T) { test.WithAPIHandler(NewV1API), ) + plugin := s.Cluster.QuotaPlugins["first"].(*plugins.GenericQuotaPlugin) + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"things": {Topology: liquid.FlatResourceTopology}} + request := assert.JSONObject{ "service_type": "first", "resource_name": "capacity", diff --git a/internal/collector/capacity_scrape.go b/internal/collector/capacity_scrape.go index be47f3250..ff9f0347e 100644 --- a/internal/collector/capacity_scrape.go +++ b/internal/collector/capacity_scrape.go @@ -361,6 +361,7 @@ func (c *Collector) processCapacityScrapeTask(ctx context.Context, task capacity func (c *Collector) confirmPendingCommitmentsIfNecessary(serviceType db.ServiceType, resourceName liquid.ResourceName) error { behavior := c.Cluster.BehaviorForResource(serviceType, resourceName) + resInfo := c.Cluster.InfoForResource(serviceType, resourceName) now := c.MeasureTime() // do not run ConfirmPendingCommitments if commitments are not enabled (or not live yet) for this resource @@ -378,7 +379,7 @@ func (c *Collector) confirmPendingCommitmentsIfNecessary(serviceType db.ServiceT defer sqlext.RollbackUnlessCommitted(tx) committableAZs := c.Cluster.Config.AvailabilityZones - if !behavior.CommitmentIsAZAware { + if resInfo.Topology == liquid.FlatResourceTopology { committableAZs = []liquid.AvailabilityZone{liquid.AvailabilityZoneAny} } for _, az := range committableAZs { diff --git a/internal/collector/capacity_scrape_test.go b/internal/collector/capacity_scrape_test.go index bb4f3428f..e1eb0caeb 100644 --- a/internal/collector/capacity_scrape_test.go +++ b/internal/collector/capacity_scrape_test.go @@ -126,7 +126,7 @@ const ( - second/things resource_behavior: # enable commitments for the */capacity resources - - { resource: '.*/capacity', commitment_durations: [ '1 hour', '10 days' ], commitment_is_az_aware: true } + - { resource: '.*/capacity', commitment_durations: [ '1 hour', '10 days' ] } # test that overcommit factor is considered when confirming commitments - { resource: first/capacity, overcommit_factor: 10.0 } quota_distribution_configs: diff --git a/internal/collector/commitment_cleanup_test.go b/internal/collector/commitment_cleanup_test.go index cdd102405..20f247297 100644 --- a/internal/collector/commitment_cleanup_test.go +++ b/internal/collector/commitment_cleanup_test.go @@ -41,7 +41,7 @@ const ( type: --test-generic resource_behavior: # enable commitments for the */capacity resources - - { resource: '.*/capacity', commitment_durations: [ '1 day', '3 years' ], commitment_is_az_aware: true } + - { resource: '.*/capacity', commitment_durations: [ '1 day', '3 years' ] } ` ) diff --git a/internal/core/resource_behavior.go b/internal/core/resource_behavior.go index ecd738688..6b63f8b7c 100644 --- a/internal/core/resource_behavior.go +++ b/internal/core/resource_behavior.go @@ -37,7 +37,6 @@ type ResourceBehavior struct { FullResourceNameRx regexpext.BoundedRegexp `yaml:"resource"` OvercommitFactor liquid.OvercommitFactor `yaml:"overcommit_factor"` CommitmentDurations []limesresources.CommitmentDuration `yaml:"commitment_durations"` - CommitmentIsAZAware bool `yaml:"commitment_is_az_aware"` CommitmentMinConfirmDate *time.Time `yaml:"commitment_min_confirm_date"` CommitmentUntilPercent *float64 `yaml:"commitment_until_percent"` CommitmentConversion CommitmentConversion `yaml:"commitment_conversion"` @@ -100,9 +99,6 @@ func (b *ResourceBehavior) Merge(other ResourceBehavior, fullResourceName string b.CommitmentMinConfirmDate = other.CommitmentMinConfirmDate } } - if other.CommitmentIsAZAware { - b.CommitmentIsAZAware = true - } if other.CommitmentUntilPercent != nil { if b.CommitmentUntilPercent == nil || *b.CommitmentUntilPercent > *other.CommitmentUntilPercent { b.CommitmentUntilPercent = other.CommitmentUntilPercent From 8f5f123ee3d175f8820030bbb7c4cddb98ed3b4a Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 14:11:30 +0100 Subject: [PATCH 15/42] apply basequota to all available AZs if topology is AZseparated --- internal/collector/scrape.go | 11 ++- internal/collector/scrape_test.go | 66 ++++++++-------- .../datamodel/apply_computed_project_quota.go | 18 +++-- .../apply_computed_project_quota_test.go | 75 ++++++++++++++++++- 4 files changed, 126 insertions(+), 44 deletions(-) diff --git a/internal/collector/scrape.go b/internal/collector/scrape.go index 1763fbb5c..6abb2ff05 100644 --- a/internal/collector/scrape.go +++ b/internal/collector/scrape.go @@ -208,11 +208,16 @@ func (c *Collector) writeResourceScrapeResult(dbDomain db.Domain, dbProject db.P srv := task.Service for resName, resData := range resourceData { + resInfo := c.Cluster.InfoForResource(task.Service.Type, resName) if len(resData.UsageData) == 0 { // ensure that there is at least one ProjectAZResource for each ProjectResource resData.UsageData = core.InAnyAZ(core.UsageData{Usage: 0}) resourceData[resName] = resData } else { + // AZ separated resources will not include "any" AZ. The basequota will be distributed towards the existing AZs. + if resInfo.Topology == liquid.AZSeparatedResourceTopology { + continue + } // for AZ-aware resources, ensure that we also have a ProjectAZResource in // "any", because ApplyComputedProjectQuota needs somewhere to write base // quotas into if enabled @@ -303,12 +308,10 @@ func (c *Collector) writeResourceScrapeResult(dbDomain db.Domain, dbProject db.P azRes.Usage = data.Usage azRes.PhysicalUsage = data.PhysicalUsage - // set AZ backend quota. Do not set backend quota to the automatically created any AZ. + // set AZ backend quota. resInfo := c.Cluster.InfoForResource(srv.Type, res.Name) if resInfo.Topology == liquid.AZSeparatedResourceTopology && resInfo.HasQuota { - if azRes.AvailabilityZone != liquid.AvailabilityZoneAny { - azRes.BackendQuota = &data.Quota - } + azRes.BackendQuota = &data.Quota } // reset backendQuota entries for topology changes if resInfo.Topology != liquid.AZSeparatedResourceTopology { diff --git a/internal/collector/scrape_test.go b/internal/collector/scrape_test.go index 726fa5117..30dacd231 100644 --- a/internal/collector/scrape_test.go +++ b/internal/collector/scrape_test.go @@ -609,24 +609,20 @@ func Test_TopologyScrapes(t *testing.T) { scrapedAt1 := s.Clock.Now().Add(-5 * time.Second) scrapedAt2 := s.Clock.Now() tr.DBChanges().AssertEqualf(` - INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (1, 1, 'any', 0, '{"t":[%[1]d],"v":[0]}'); - INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (10, 4, 'any', 0, '{"t":[%[3]d],"v":[0]}'); - INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (11, 4, 'az-one', 0, 0, '{"t":[%[3]d],"v":[0]}', 50); - INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (12, 4, 'az-two', 0, 0, '{"t":[%[3]d],"v":[0]}', 50); - INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (13, 5, 'any', 0, '{"t":[%[3]d],"v":[0]}'); - INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (14, 5, 'az-one', 0, '{"t":[%[3]d],"v":[0]}'); - INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (15, 5, 'az-two', 0, '{"t":[%[3]d],"v":[0]}'); - INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (16, 6, 'any', 0, '{"t":[%[3]d],"v":[0]}'); - INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (17, 6, 'az-one', 2, '[{"index":0},{"index":1}]', '{"t":[%[3]d],"v":[2]}', 21); - INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (18, 6, 'az-two', 2, '[{"index":2},{"index":3}]', '{"t":[%[3]d],"v":[2]}', 21); - INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (2, 1, 'az-one', 0, 0, '{"t":[%[1]d],"v":[0]}', 50); - INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (3, 1, 'az-two', 0, 0, '{"t":[%[1]d],"v":[0]}', 50); - INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (4, 2, 'any', 0, '{"t":[%[1]d],"v":[0]}'); - INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (5, 2, 'az-one', 0, '{"t":[%[1]d],"v":[0]}'); - INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (6, 2, 'az-two', 0, '{"t":[%[1]d],"v":[0]}'); - INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (7, 3, 'any', 0, '{"t":[%[1]d],"v":[0]}'); - INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (8, 3, 'az-one', 2, '[{"index":0},{"index":1}]', '{"t":[%[1]d],"v":[2]}', 21); - INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (9, 3, 'az-two', 2, '[{"index":2},{"index":3}]', '{"t":[%[1]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (1, 1, 'az-one', 0, 0, '{"t":[%[1]d],"v":[0]}', 50); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (10, 5, 'any', 0, '{"t":[%[3]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (11, 5, 'az-one', 0, '{"t":[%[3]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (12, 5, 'az-two', 0, '{"t":[%[3]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (13, 6, 'az-one', 2, '[{"index":0},{"index":1}]', '{"t":[%[3]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (14, 6, 'az-two', 2, '[{"index":2},{"index":3}]', '{"t":[%[3]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (2, 1, 'az-two', 0, 0, '{"t":[%[1]d],"v":[0]}', 50); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (3, 2, 'any', 0, '{"t":[%[1]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (4, 2, 'az-one', 0, '{"t":[%[1]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (5, 2, 'az-two', 0, '{"t":[%[1]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (6, 3, 'az-one', 2, '[{"index":0},{"index":1}]', '{"t":[%[1]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (7, 3, 'az-two', 2, '[{"index":2},{"index":3}]', '{"t":[%[1]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (8, 4, 'az-one', 0, 0, '{"t":[%[3]d],"v":[0]}', 50); + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (9, 4, 'az-two', 0, 0, '{"t":[%[3]d],"v":[0]}', 50); INSERT INTO project_resources (id, service_id, name, quota, backend_quota) VALUES (1, 1, 'capacity', 0, 100); INSERT INTO project_resources (id, service_id, name) VALUES (2, 1, 'capacity_portion'); INSERT INTO project_resources (id, service_id, name, quota, backend_quota) VALUES (3, 1, 'things', 0, 42); @@ -665,14 +661,14 @@ func Test_TopologyScrapes(t *testing.T) { mustT(t, syncJob.ProcessOne(s.Ctx, withLabel)) tr.DBChanges().AssertEqualf(` - UPDATE project_az_resources SET backend_quota = 20 WHERE id = 11 AND resource_id = 4 AND az = 'az-one'; - UPDATE project_az_resources SET backend_quota = 20 WHERE id = 12 AND resource_id = 4 AND az = 'az-two'; - UPDATE project_az_resources SET backend_quota = 13 WHERE id = 17 AND resource_id = 6 AND az = 'az-one'; - UPDATE project_az_resources SET backend_quota = 13 WHERE id = 18 AND resource_id = 6 AND az = 'az-two'; - UPDATE project_az_resources SET backend_quota = 20 WHERE id = 2 AND resource_id = 1 AND az = 'az-one'; - UPDATE project_az_resources SET backend_quota = 20 WHERE id = 3 AND resource_id = 1 AND az = 'az-two'; - UPDATE project_az_resources SET backend_quota = 13 WHERE id = 8 AND resource_id = 3 AND az = 'az-one'; - UPDATE project_az_resources SET backend_quota = 13 WHERE id = 9 AND resource_id = 3 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 1 AND resource_id = 1 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 13 AND resource_id = 6 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 14 AND resource_id = 6 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 2 AND resource_id = 1 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 6 AND resource_id = 3 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 7 AND resource_id = 3 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 8 AND resource_id = 4 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 9 AND resource_id = 4 AND az = 'az-two'; UPDATE project_resources SET backend_quota = 20 WHERE id = 1 AND service_id = 1 AND name = 'capacity'; UPDATE project_resources SET backend_quota = 13 WHERE id = 3 AND service_id = 1 AND name = 'things'; UPDATE project_resources SET backend_quota = 20 WHERE id = 4 AND service_id = 2 AND name = 'capacity'; @@ -689,14 +685,16 @@ func Test_TopologyScrapes(t *testing.T) { mustT(t, job.ProcessOne(s.Ctx, withLabel)) tr.DBChanges().AssertEqualf(` - UPDATE project_az_resources SET backend_quota = 50 WHERE id = 11 AND resource_id = 4 AND az = 'az-one'; - UPDATE project_az_resources SET backend_quota = 50 WHERE id = 12 AND resource_id = 4 AND az = 'az-two'; - UPDATE project_az_resources SET backend_quota = NULL WHERE id = 17 AND resource_id = 6 AND az = 'az-one'; - UPDATE project_az_resources SET backend_quota = NULL WHERE id = 18 AND resource_id = 6 AND az = 'az-two'; - UPDATE project_az_resources SET backend_quota = 50 WHERE id = 2 AND resource_id = 1 AND az = 'az-one'; - UPDATE project_az_resources SET backend_quota = 50 WHERE id = 3 AND resource_id = 1 AND az = 'az-two'; - UPDATE project_az_resources SET backend_quota = NULL WHERE id = 8 AND resource_id = 3 AND az = 'az-one'; - UPDATE project_az_resources SET backend_quota = NULL WHERE id = 9 AND resource_id = 3 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 1 AND resource_id = 1 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 13 AND resource_id = 6 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 14 AND resource_id = 6 AND az = 'az-two'; + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (15, 3, 'any', 0, '{"t":[1825],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (16, 6, 'any', 0, '{"t":[1830],"v":[0]}'); + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 2 AND resource_id = 1 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 6 AND resource_id = 3 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 7 AND resource_id = 3 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 8 AND resource_id = 4 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 9 AND resource_id = 4 AND az = 'az-two'; UPDATE project_services SET scraped_at = 1825, checked_at = 1825, next_scrape_at = 3625 WHERE id = 1 AND project_id = 1 AND type = 'unittest'; UPDATE project_services SET scraped_at = 1830, checked_at = 1830, next_scrape_at = 3630 WHERE id = 2 AND project_id = 2 AND type = 'unittest'; `) diff --git a/internal/datamodel/apply_computed_project_quota.go b/internal/datamodel/apply_computed_project_quota.go index da2c8ccf0..df5d276a2 100644 --- a/internal/datamodel/apply_computed_project_quota.go +++ b/internal/datamodel/apply_computed_project_quota.go @@ -151,7 +151,8 @@ func ApplyComputedProjectQuota(serviceType db.ServiceType, resourceName liquid.R } // evaluate QD algorithm - target, allowsQuotaOvercommit := acpqComputeQuotas(stats, cfg, constraints) + // AZ separated basequota will be assigned to all available AZs + target, allowsQuotaOvercommit := acpqComputeQuotas(stats, cfg, constraints, resInfo) if logg.ShowDebug { // NOTE: The structs that contain pointers must be printed as JSON to actually show all values. logg.Debug("ACPQ for %s/%s: stats = %#v", serviceType, resourceName, stats) @@ -286,7 +287,7 @@ type acpqGlobalTarget map[limes.AvailabilityZone]acpqAZTarget // effects (reading the DB, writing the DB, setting quota in the backend). // This function is separate because most test cases work on this level. // The full ApplyComputedProjectQuota() function is tested during capacity scraping. -func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints) (target acpqGlobalTarget, allowsQuotaOvercommit map[limes.AvailabilityZone]bool) { +func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints, resInfo liquid.ResourceInfo) (target acpqGlobalTarget, allowsQuotaOvercommit map[limes.AvailabilityZone]bool) { // enumerate which project resource IDs and AZs are relevant // ("Relevant" AZs are all that have allocation stats available.) isProjectResourceID := make(map[db.ProjectResourceID]struct{}) @@ -300,7 +301,7 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats } } slices.Sort(allAZsInOrder) - if cfg.ProjectBaseQuota > 0 { + if cfg.ProjectBaseQuota > 0 && resInfo.Topology != liquid.AZSeparatedResourceTopology { // base quota is given out in the pseudo-AZ "any", so we need to calculate quota for "any", too isRelevantAZ[limes.AvailabilityZoneAny] = struct{}{} } @@ -321,7 +322,7 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats // in AZ-aware resources, quota for the pseudo-AZ "any" is backed by capacity // in all the real AZs, so it can only allow quota overcommit if all AZs do - if isAZAware { + if isAZAware && resInfo.Topology != liquid.AZSeparatedResourceTopology { allowsQuotaOvercommit[limes.AvailabilityZoneAny] = allRealAZsAllowQuotaOvercommit } @@ -374,7 +375,14 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats } } if sumOfLocalizedQuotas < cfg.ProjectBaseQuota { - target[limes.AvailabilityZoneAny][resourceID].Desired = cfg.ProjectBaseQuota - sumOfLocalizedQuotas + // AZ separated toplogy receives the basequota to all available AZs + if resInfo.Topology == liquid.AZSeparatedResourceTopology { + for az := range isRelevantAZ { + target[az][resourceID].Desired = cfg.ProjectBaseQuota - target[az][resourceID].Allocated + } + } else { + target[limes.AvailabilityZoneAny][resourceID].Desired = cfg.ProjectBaseQuota - sumOfLocalizedQuotas + } } } if !slices.Contains(allAZsInOrder, limes.AvailabilityZoneAny) { diff --git a/internal/datamodel/apply_computed_project_quota_test.go b/internal/datamodel/apply_computed_project_quota_test.go index ef48d17e6..3f922cdbd 100644 --- a/internal/datamodel/apply_computed_project_quota_test.go +++ b/internal/datamodel/apply_computed_project_quota_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/sapcc/go-api-declarations/limes" + "github.com/sapcc/go-api-declarations/liquid" "github.com/sapcc/limes/internal/core" "github.com/sapcc/limes/internal/db" @@ -160,6 +161,68 @@ func TestACPQBasicWithAZAwareness(t *testing.T) { } } +func TestACPQBasicWithAZSeparated(t *testing.T) { + input := map[limes.AvailabilityZone]clusterAZAllocationStats{ + "az-one": { + Capacity: 200, + ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{ + // 401 and 402 are boring base cases with usage only in one AZ or both AZs, respectively + 401: constantUsage(20), + 402: constantUsage(20), + // 403 tests how growth multiplier follows historical usage + 403: {Usage: 30, MinHistoricalUsage: 28, MaxHistoricalUsage: 30}, + // 404 tests how historical usage limits quota shrinking + 404: {Usage: 5, MinHistoricalUsage: 5, MaxHistoricalUsage: 20}, + // 405 tests how commitment guarantees quota even with low usage, + // and also that usage in one AZ does not reflect commitments in another + 405: {Committed: 60, Usage: 10, MinHistoricalUsage: 8, MaxHistoricalUsage: 12}, + // 406 and 407 test the application of base quota in "any" + 406: constantUsage(0), + 407: constantUsage(2), + }, + }, + "az-two": { + Capacity: 200, + ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{ + 401: constantUsage(20), + 402: constantUsage(0), + 403: {Usage: 20, MinHistoricalUsage: 19, MaxHistoricalUsage: 20}, + 404: {Usage: 0, MinHistoricalUsage: 0, MaxHistoricalUsage: 15}, + 405: constantUsage(40), + 406: constantUsage(0), + 407: constantUsage(1), + }, + }, + } + cfg := core.AutogrowQuotaDistributionConfiguration{ + GrowthMultiplier: 1.2, + ProjectBaseQuota: 10, + } + + for _, cfg.AllowQuotaOvercommitUntilAllocatedPercent = range []float64{0, 10000} { + expectACPQResultForAZSeparated(t, input, cfg, nil, acpqGlobalTarget{ + "az-one": { + 401: {Allocated: 24}, + 402: {Allocated: 24}, + 403: {Allocated: 33}, // 28 * 1.2 = 33.6 + 404: {Allocated: 20}, + 405: {Allocated: 72}, // 60 * 1.2 = 72 + 406: {Allocated: 10}, // Basequota + 407: {Allocated: 7}, // 2 * 1.2 = 2.4 rounded to 3 (guaranteed minimum growth) -> Basquota: 10 - 3 = 7 + }, + "az-two": { + 401: {Allocated: 24}, + 402: {Allocated: 0}, + 403: {Allocated: 22}, // 19 * 1.2 = 22.8 + 404: {Allocated: 15}, + 405: {Allocated: 48}, // 40 * 1.2 = 48 + 406: {Allocated: 10}, // Basequota + 407: {Allocated: 8}, // 1 * 1.2 = 1.2 rounded to 2 (guaranteed minimum growth) -> Basequota: 10 - 2 = 8 + }, + }) + } +} + func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { // This test case checks the priority of capacity allocation. // All stages use the same basic scenario, except that capacity will be @@ -547,9 +610,19 @@ func withCommitted(committed uint64, stats projectAZAllocationStats) projectAZAl return stats } +func expectACPQResultForAZSeparated(t *testing.T, input map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints, expected acpqGlobalTarget) { + t.Helper() + actual, _ := acpqComputeQuotas(input, cfg, constraints, liquid.ResourceInfo{Topology: liquid.AZSeparatedResourceTopology}) + createACPQResult(t, input, cfg, actual, expected) +} + func expectACPQResult(t *testing.T, input map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints, expected acpqGlobalTarget) { t.Helper() - actual, _ := acpqComputeQuotas(input, cfg, constraints) + actual, _ := acpqComputeQuotas(input, cfg, constraints, liquid.ResourceInfo{}) + createACPQResult(t, input, cfg, actual, expected) +} + +func createACPQResult(t *testing.T, input map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, actual, expected acpqGlobalTarget) { // normalize away any left-over intermediate values for _, azTarget := range actual { for _, projectTarget := range azTarget { From 95e45d56d6fcc117d1d3c0d581128af3306598d6 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 15:43:51 +0100 Subject: [PATCH 16/42] commitment: remove empty behaviors. rearrange flatresourcetoplogy condition --- internal/api/commitment.go | 10 +++++----- internal/api/commitment_test.go | 2 -- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/internal/api/commitment.go b/internal/api/commitment.go index ff60da203..dc9e1b255 100644 --- a/internal/api/commitment.go +++ b/internal/api/commitment.go @@ -240,14 +240,14 @@ func (p *v1Provider) parseAndValidateCommitmentRequest(w http.ResponseWriter, r http.Error(w, "commitments are not enabled for this resource", http.StatusUnprocessableEntity) return nil, nil, nil } - if resInfo.Topology != liquid.FlatResourceTopology { - if !slices.Contains(p.Cluster.Config.AvailabilityZones, req.AvailabilityZone) { - http.Error(w, "no such availability zone", http.StatusUnprocessableEntity) + if resInfo.Topology == liquid.FlatResourceTopology { + if req.AvailabilityZone != limes.AvailabilityZoneAny { + http.Error(w, `resource does not accept AZ-aware commitments, so the AZ must be set to "any"`, http.StatusUnprocessableEntity) return nil, nil, nil } } else { - if req.AvailabilityZone != limes.AvailabilityZoneAny { - http.Error(w, `resource does not accept AZ-aware commitments, so the AZ must be set to "any"`, http.StatusUnprocessableEntity) + if !slices.Contains(p.Cluster.Config.AvailabilityZones, req.AvailabilityZone) { + http.Error(w, "no such availability zone", http.StatusUnprocessableEntity) return nil, nil, nil } } diff --git a/internal/api/commitment_test.go b/internal/api/commitment_test.go index acc95c768..0ffa194db 100644 --- a/internal/api/commitment_test.go +++ b/internal/api/commitment_test.go @@ -49,8 +49,6 @@ const testCommitmentsYAML = ` - resource: first/.* commitment_durations: ["1 hour", "2 hours"] commitment_min_confirm_date: '1970-01-08T00:00:00Z' # one week after start of mock.Clock - - resource: first/things - - resource: first/capacity ` const testCommitmentsYAMLWithoutMinConfirmDate = ` availability_zones: [ az-one, az-two ] From a5e846c8c637ed1100fec4d90e0ff56e81ac53b1 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 15:54:17 +0100 Subject: [PATCH 17/42] scrape: fix error collector --- internal/plugins/capacity_liquid.go | 8 ++++---- internal/plugins/liquid.go | 8 ++++---- internal/test/plugins/quota_generic.go | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/internal/plugins/capacity_liquid.go b/internal/plugins/capacity_liquid.go index 79c04f354..ebe10fa7f 100644 --- a/internal/plugins/capacity_liquid.go +++ b/internal/plugins/capacity_liquid.go @@ -95,17 +95,17 @@ func (p *liquidCapacityPlugin) Scrape(ctx context.Context, backchannel core.Capa p.LiquidServiceType, p.LiquidServiceInfo.Version, resp.InfoVersion) } resourceNames := SortMapKeys(p.LiquidServiceInfo.Resources) + var errs []error for _, resourceName := range resourceNames { - errs := []error{} perAZ := resp.Resources[liquid.ResourceName(resourceName)].PerAZ toplogy := p.LiquidServiceInfo.Resources[liquid.ResourceName(resourceName)].Topology err := MatchLiquidReportToTopology(perAZ, toplogy) if err != nil { errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) } - if len(errs) > 0 { - return nil, nil, errors.Join(errs...) - } + } + if len(errs) > 0 { + return nil, nil, errors.Join(errs...) } resultInService := make(map[liquid.ResourceName]core.PerAZ[core.CapacityData], len(p.LiquidServiceInfo.Resources)) diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index 4b9b6a9d8..44fc1db38 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -124,17 +124,17 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro p.LiquidServiceType, p.LiquidServiceInfo.Version, resp.InfoVersion) } resourceNames := SortMapKeys(p.LiquidServiceInfo.Resources) + var errs []error for _, resourceName := range resourceNames { - errs := []error{} perAZ := resp.Resources[liquid.ResourceName(resourceName)].PerAZ toplogy := p.LiquidServiceInfo.Resources[liquid.ResourceName(resourceName)].Topology err := MatchLiquidReportToTopology(perAZ, toplogy) if err != nil { errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) } - if len(errs) > 0 { - return nil, nil, errors.Join(errs...) - } + } + if len(errs) > 0 { + return nil, nil, errors.Join(errs...) } result = make(map[liquid.ResourceName]core.ResourceData, len(p.LiquidServiceInfo.Resources)) diff --git a/internal/test/plugins/quota_generic.go b/internal/test/plugins/quota_generic.go index 4a37444f0..ee3f9d809 100644 --- a/internal/test/plugins/quota_generic.go +++ b/internal/test/plugins/quota_generic.go @@ -171,7 +171,7 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr } if len(p.ReportedAZs) > 0 { - errs := []error{} + var errs []error resourceNames := plugins.SortMapKeys(p.LiquidServiceInfo.Resources) for _, resourceName := range resourceNames { toplogy := p.LiquidServiceInfo.Resources[liquid.ResourceName(resourceName)].Topology From d1b55601bd036845b7322047085fb151186ae273 Mon Sep 17 00:00:00 2001 From: Stefan Voigt <52136954+VoigtS@users.noreply.github.com> Date: Thu, 5 Dec 2024 15:59:08 +0100 Subject: [PATCH 18/42] Update internal/plugins/utils.go fix error slice declaration. Co-authored-by: Stefan Majewsky --- internal/plugins/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index aff8d67b7..b1de3b657 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -42,7 +42,7 @@ func SortMapKeys[M map[K]V, K ~string, V any](mapToSort M) []string { } func CheckResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { - errs := []error{} + var errs []error resources := serviceInfo.Resources resourceNames := SortMapKeys(resources) From 697f0e970cef1b023e4df8db72f2932ea2955152 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 16:20:55 +0100 Subject: [PATCH 19/42] Fix map key sort --- internal/plugins/capacity_liquid.go | 4 ++-- internal/plugins/liquid.go | 4 ++-- internal/plugins/utils.go | 13 +++++-------- internal/test/plugins/quota_generic.go | 2 +- 4 files changed, 10 insertions(+), 13 deletions(-) diff --git a/internal/plugins/capacity_liquid.go b/internal/plugins/capacity_liquid.go index ebe10fa7f..50f3d4552 100644 --- a/internal/plugins/capacity_liquid.go +++ b/internal/plugins/capacity_liquid.go @@ -97,8 +97,8 @@ func (p *liquidCapacityPlugin) Scrape(ctx context.Context, backchannel core.Capa resourceNames := SortMapKeys(p.LiquidServiceInfo.Resources) var errs []error for _, resourceName := range resourceNames { - perAZ := resp.Resources[liquid.ResourceName(resourceName)].PerAZ - toplogy := p.LiquidServiceInfo.Resources[liquid.ResourceName(resourceName)].Topology + perAZ := resp.Resources[resourceName].PerAZ + toplogy := p.LiquidServiceInfo.Resources[resourceName].Topology err := MatchLiquidReportToTopology(perAZ, toplogy) if err != nil { errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index 44fc1db38..2a66d0956 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -126,8 +126,8 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro resourceNames := SortMapKeys(p.LiquidServiceInfo.Resources) var errs []error for _, resourceName := range resourceNames { - perAZ := resp.Resources[liquid.ResourceName(resourceName)].PerAZ - toplogy := p.LiquidServiceInfo.Resources[liquid.ResourceName(resourceName)].Topology + perAZ := resp.Resources[resourceName].PerAZ + toplogy := p.LiquidServiceInfo.Resources[resourceName].Topology err := MatchLiquidReportToTopology(perAZ, toplogy) if err != nil { errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index b1de3b657..cda03b56d 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -22,7 +22,8 @@ package plugins import ( "errors" "fmt" - "sort" + "maps" + "slices" "github.com/sapcc/go-api-declarations/liquid" ) @@ -31,13 +32,9 @@ func p2u64(val uint64) *uint64 { return &val } -func SortMapKeys[M map[K]V, K ~string, V any](mapToSort M) []string { - var sortedKeys []string - for key := range mapToSort { - sortedKeys = append(sortedKeys, string(key)) - } - sort.Strings(sortedKeys) - +func SortMapKeys[M map[K]V, K ~string, V any](mapToSort M) []K { + sortedKeys := slices.Collect(maps.Keys(mapToSort)) + slices.Sort(sortedKeys) return sortedKeys } diff --git a/internal/test/plugins/quota_generic.go b/internal/test/plugins/quota_generic.go index ee3f9d809..c083feb86 100644 --- a/internal/test/plugins/quota_generic.go +++ b/internal/test/plugins/quota_generic.go @@ -174,7 +174,7 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr var errs []error resourceNames := plugins.SortMapKeys(p.LiquidServiceInfo.Resources) for _, resourceName := range resourceNames { - toplogy := p.LiquidServiceInfo.Resources[liquid.ResourceName(resourceName)].Topology + toplogy := p.LiquidServiceInfo.Resources[resourceName].Topology err := plugins.MatchLiquidReportToTopology(p.ReportedAZs, toplogy) if err != nil { errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) From d48bb643615652391c740a9511969d55bbea32ff Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 16:23:58 +0100 Subject: [PATCH 20/42] fix remaining resourceName type conversion --- internal/plugins/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index cda03b56d..b3304614d 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -44,7 +44,7 @@ func CheckResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { resourceNames := SortMapKeys(resources) for _, resourceName := range resourceNames { - topology := resources[liquid.ResourceName(resourceName)].Topology + topology := resources[resourceName].Topology if topology == "" { continue } From 9440dc84f2716148f47f991063a220f64ea7b6ee Mon Sep 17 00:00:00 2001 From: Stefan Voigt <52136954+VoigtS@users.noreply.github.com> Date: Thu, 5 Dec 2024 16:25:07 +0100 Subject: [PATCH 21/42] Update internal/plugins/utils.go return an non fatal error when toplogy is not set. Co-authored-by: Stefan Majewsky --- internal/plugins/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index b3304614d..96682147a 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -46,7 +46,7 @@ func CheckResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { for _, resourceName := range resourceNames { topology := resources[resourceName].Topology if topology == "" { - continue + logg.Error("missing topology on resource: %s", resourceName) } if !topology.IsValid() { errs = append(errs, fmt.Errorf("invalid toplogy: %s on resource: %s", topology, resourceName)) From 3d31bc91e8815390a4fd3375e704b7bfaf09ebe0 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 16:27:18 +0100 Subject: [PATCH 22/42] empty toplogy will now log a missing toplogy --- internal/plugins/utils.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index 96682147a..4c9fe8e72 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -26,6 +26,7 @@ import ( "slices" "github.com/sapcc/go-api-declarations/liquid" + "github.com/sapcc/go-bits/logg" ) func p2u64(val uint64) *uint64 { From 5593a611dff099831c00df5cc8ae2e165a12dcaf Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 16:31:26 +0100 Subject: [PATCH 23/42] fix all topology misspellings --- internal/collector/scrape_test.go | 16 ++++++++-------- .../datamodel/apply_computed_project_quota.go | 2 +- internal/plugins/capacity_liquid.go | 4 ++-- internal/plugins/liquid.go | 4 ++-- internal/plugins/utils.go | 4 ++-- internal/test/plugins/quota_generic.go | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/internal/collector/scrape_test.go b/internal/collector/scrape_test.go index 30dacd231..3fa28926b 100644 --- a/internal/collector/scrape_test.go +++ b/internal/collector/scrape_test.go @@ -679,7 +679,7 @@ func Test_TopologyScrapes(t *testing.T) { s.Clock.StepBy(scrapeInterval) - // toplogy of a resource changes. Reset AZ-separated backend_quota + // topology of a resource changes. Reset AZ-separated backend_quota plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZAwareResourceTopology}} mustT(t, job.ProcessOne(s.Ctx, withLabel)) mustT(t, job.ProcessOne(s.Ctx, withLabel)) @@ -704,26 +704,26 @@ func Test_TopologyScrapes(t *testing.T) { // negative: scrape with flat topology returns invalid AZs plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.FlatResourceTopology}} plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"az-one": status, "az-two": status} - mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with toplogy type: flat returned AZs: [az-one az-two]")) + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with topology type: flat returned AZs: [az-one az-two]")) - // negative: scrape with az-aware toplogy returns invalid any AZ + // negative: scrape with az-aware topology returns invalid any AZ plugin.LiquidServiceInfo.Resources["capacity"] = liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology} plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"any": status} - mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: service: unittest, resource: capacity: scrape with toplogy type: az-aware returned AZs: [any]")) + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: service: unittest, resource: capacity: scrape with topology type: az-aware returned AZs: [any]")) s.Clock.StepBy(scrapeInterval) - // negative: scrape with az-separated toplogy returns invalid AZs any and unknown + // negative: scrape with az-separated topology returns invalid AZs any and unknown plugin.LiquidServiceInfo.Resources["capacity"] = liquid.ResourceInfo{Topology: liquid.AZSeparatedResourceTopology} plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"az-one": status, "unknown": status} - mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with toplogy type: az-separated returned AZs: [az-one unknown]")) + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with topology type: az-separated returned AZs: [az-one unknown]")) // negative: reject liquid initialization with invalid topologies plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: "invalidAZ1"}, "things": {Topology: "invalidAZ2"}} - mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: invalid toplogy: invalidAZ1 on resource: capacity\ninvalid toplogy: invalidAZ2 on resource: things")) + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: invalid topology: invalidAZ1 on resource: capacity\ninvalid topology: invalidAZ2 on resource: things")) s.Clock.StepBy(scrapeInterval) // negative: multiple resources with mismatching topology to AZ responses plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZSeparatedResourceTopology}} plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"unknown": status} - mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with toplogy type: az-separated returned AZs: [unknown]\nservice: unittest, resource: things: scrape with toplogy type: az-separated returned AZs: [unknown]")) + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with topology type: az-separated returned AZs: [unknown]\nservice: unittest, resource: things: scrape with topology type: az-separated returned AZs: [unknown]")) } diff --git a/internal/datamodel/apply_computed_project_quota.go b/internal/datamodel/apply_computed_project_quota.go index df5d276a2..425112ceb 100644 --- a/internal/datamodel/apply_computed_project_quota.go +++ b/internal/datamodel/apply_computed_project_quota.go @@ -375,7 +375,7 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats } } if sumOfLocalizedQuotas < cfg.ProjectBaseQuota { - // AZ separated toplogy receives the basequota to all available AZs + // AZ separated topology receives the basequota to all available AZs if resInfo.Topology == liquid.AZSeparatedResourceTopology { for az := range isRelevantAZ { target[az][resourceID].Desired = cfg.ProjectBaseQuota - target[az][resourceID].Allocated diff --git a/internal/plugins/capacity_liquid.go b/internal/plugins/capacity_liquid.go index 50f3d4552..7f1278fbb 100644 --- a/internal/plugins/capacity_liquid.go +++ b/internal/plugins/capacity_liquid.go @@ -98,8 +98,8 @@ func (p *liquidCapacityPlugin) Scrape(ctx context.Context, backchannel core.Capa var errs []error for _, resourceName := range resourceNames { perAZ := resp.Resources[resourceName].PerAZ - toplogy := p.LiquidServiceInfo.Resources[resourceName].Topology - err := MatchLiquidReportToTopology(perAZ, toplogy) + topology := p.LiquidServiceInfo.Resources[resourceName].Topology + err := MatchLiquidReportToTopology(perAZ, topology) if err != nil { errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) } diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index 2a66d0956..4318f980a 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -127,8 +127,8 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro var errs []error for _, resourceName := range resourceNames { perAZ := resp.Resources[resourceName].PerAZ - toplogy := p.LiquidServiceInfo.Resources[resourceName].Topology - err := MatchLiquidReportToTopology(perAZ, toplogy) + topology := p.LiquidServiceInfo.Resources[resourceName].Topology + err := MatchLiquidReportToTopology(perAZ, topology) if err != nil { errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) } diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index 4c9fe8e72..cfe86818f 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -50,7 +50,7 @@ func CheckResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { logg.Error("missing topology on resource: %s", resourceName) } if !topology.IsValid() { - errs = append(errs, fmt.Errorf("invalid toplogy: %s on resource: %s", topology, resourceName)) + errs = append(errs, fmt.Errorf("invalid topology: %s on resource: %s", topology, resourceName)) } } if len(errs) > 0 { @@ -80,5 +80,5 @@ func MatchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone] } reportedAZs := SortMapKeys(perAZReport) - return fmt.Errorf("scrape with toplogy type: %s returned AZs: %v", topology, reportedAZs) + return fmt.Errorf("scrape with topology type: %s returned AZs: %v", topology, reportedAZs) } diff --git a/internal/test/plugins/quota_generic.go b/internal/test/plugins/quota_generic.go index c083feb86..891e4dd80 100644 --- a/internal/test/plugins/quota_generic.go +++ b/internal/test/plugins/quota_generic.go @@ -174,8 +174,8 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr var errs []error resourceNames := plugins.SortMapKeys(p.LiquidServiceInfo.Resources) for _, resourceName := range resourceNames { - toplogy := p.LiquidServiceInfo.Resources[resourceName].Topology - err := plugins.MatchLiquidReportToTopology(p.ReportedAZs, toplogy) + topology := p.LiquidServiceInfo.Resources[resourceName].Topology + err := plugins.MatchLiquidReportToTopology(p.ReportedAZs, topology) if err != nil { errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) } From 022983db9ce408d56b8ebdc5ab7a2a217b3ab872 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 16:46:56 +0100 Subject: [PATCH 24/42] Change core.Quota type to equivalent liquid.ResourceQuotaRequest type --- internal/collector/sync_quota_to_backend.go | 4 ++-- internal/core/plugin.go | 7 +------ internal/plugins/liquid.go | 8 ++++---- internal/plugins/nova.go | 8 ++++---- internal/test/plugins/quota_generic.go | 18 +++++++++--------- internal/test/plugins/quota_noop.go | 2 +- main.go | 4 ++-- 7 files changed, 23 insertions(+), 28 deletions(-) diff --git a/internal/collector/sync_quota_to_backend.go b/internal/collector/sync_quota_to_backend.go index 6c1cd5daa..36419b382 100644 --- a/internal/collector/sync_quota_to_backend.go +++ b/internal/collector/sync_quota_to_backend.go @@ -193,13 +193,13 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, if needsApply || azSeparatedNeedsApply { // double-check that we only include quota values for resources that the backend currently knows about - targetQuotasForBackend := make(map[liquid.ResourceName]core.Quotas) + targetQuotasForBackend := make(map[liquid.ResourceName]liquid.ResourceQuotaRequest) for resName, resInfo := range plugin.Resources() { if !resInfo.HasQuota { continue } //NOTE: If `targetQuotasInDB` does not have an entry for this resource, we will write 0 into the backend. - targetQuotasForBackend[resName] = core.Quotas{QuotaForResource: targetQuotasInDB[resName], QuotasForAZs: targetAZQuotasInDB[resName]} + targetQuotasForBackend[resName] = liquid.ResourceQuotaRequest{Quota: targetQuotasInDB[resName], PerAZ: targetAZQuotasInDB[resName]} } // apply quotas in backend diff --git a/internal/core/plugin.go b/internal/core/plugin.go index 316cde88c..e731f0463 100644 --- a/internal/core/plugin.go +++ b/internal/core/plugin.go @@ -141,7 +141,7 @@ type QuotaPlugin interface { // SetQuota updates the backend service's quotas for the given project in the // given domain to the values specified here. The map is guaranteed to contain // values for all resources defined by Resources(). - SetQuota(ctx context.Context, project KeystoneProject, quotas map[liquid.ResourceName]Quotas) error + SetQuota(ctx context.Context, project KeystoneProject, quotaReq map[liquid.ResourceName]liquid.ResourceQuotaRequest) error // Rates returns metadata for all the rates that this plugin scrapes // from the backend service. @@ -181,11 +181,6 @@ type QuotaPlugin interface { CollectMetrics(ch chan<- prometheus.Metric, project KeystoneProject, serializedMetrics []byte) error } -type Quotas struct { - QuotaForResource uint64 - QuotasForAZs map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest -} - // ServiceInfo is a reduced version of type limes.ServiceInfo, suitable for // being returned from func QuotaPlugin.ServiceInfo(). type ServiceInfo struct { diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index 4318f980a..53c1bfb59 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -194,12 +194,12 @@ func castSliceToAny[T any](input []T) (output []any) { } // SetQuota implements the core.QuotaPlugin interface. -func (p *liquidQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]core.Quotas) error { +func (p *liquidQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotaReq map[liquid.ResourceName]liquid.ResourceQuotaRequest) error { req := liquid.ServiceQuotaRequest{ - Resources: make(map[liquid.ResourceName]liquid.ResourceQuotaRequest, len(quotas)), + Resources: make(map[liquid.ResourceName]liquid.ResourceQuotaRequest, len(quotaReq)), } - for resName, quotas := range quotas { - req.Resources[resName] = liquid.ResourceQuotaRequest{Quota: quotas.QuotaForResource, PerAZ: quotas.QuotasForAZs} + for resName, request := range quotaReq { + req.Resources[resName] = liquid.ResourceQuotaRequest{Quota: request.Quota, PerAZ: request.PerAZ} } if p.LiquidServiceInfo.QuotaUpdateNeedsProjectMetadata { req.ProjectMetadata = project.ForLiquid() diff --git a/internal/plugins/nova.go b/internal/plugins/nova.go index 6b0eee8e7..839a78743 100644 --- a/internal/plugins/nova.go +++ b/internal/plugins/nova.go @@ -345,11 +345,11 @@ func (p *novaPlugin) pooledResourceName(hwVersion string, base liquid.ResourceNa } // SetQuota implements the core.QuotaPlugin interface. -func (p *novaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]core.Quotas) error { +func (p *novaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotaReq map[liquid.ResourceName]liquid.ResourceQuotaRequest) error { // translate Limes resource names for separate instance quotas into Nova quota names - novaQuotas := make(novaQuotaUpdateOpts, len(quotas)) - for resourceName, quotas := range quotas { - novaQuotas[string(resourceName)] = quotas.QuotaForResource + novaQuotas := make(novaQuotaUpdateOpts, len(quotaReq)) + for resourceName, request := range quotaReq { + novaQuotas[string(resourceName)] = request.Quota } return quotasets.Update(ctx, p.NovaV2, project.UUID, novaQuotas).Err diff --git a/internal/test/plugins/quota_generic.go b/internal/test/plugins/quota_generic.go index 891e4dd80..1b56e9345 100644 --- a/internal/test/plugins/quota_generic.go +++ b/internal/test/plugins/quota_generic.go @@ -45,12 +45,12 @@ func init() { // mostly reports static data and offers several controls to simulate failed // operations. type GenericQuotaPlugin struct { - ServiceType db.ServiceType `yaml:"-"` - LiquidServiceInfo liquid.ServiceInfo `yaml:"-"` - StaticRateInfos map[liquid.RateName]liquid.RateInfo `yaml:"rate_infos"` - StaticResourceData map[liquid.ResourceName]*core.ResourceData `yaml:"-"` - StaticResourceAttributes map[liquid.ResourceName]map[string]any `yaml:"-"` - OverrideQuota map[string]map[liquid.ResourceName]core.Quotas `yaml:"-"` // first key is project UUID + ServiceType db.ServiceType `yaml:"-"` + LiquidServiceInfo liquid.ServiceInfo `yaml:"-"` + StaticRateInfos map[liquid.RateName]liquid.RateInfo `yaml:"rate_infos"` + StaticResourceData map[liquid.ResourceName]*core.ResourceData `yaml:"-"` + StaticResourceAttributes map[liquid.ResourceName]map[string]any `yaml:"-"` + OverrideQuota map[string]map[liquid.ResourceName]liquid.ResourceQuotaRequest `yaml:"-"` // first key is project UUID // behavior flags that can be set by a unit test ReportedAZs map[liquid.AvailabilityZone]*any `yaml:"-"` ScrapeFails bool `yaml:"-"` @@ -78,7 +78,7 @@ func (p *GenericQuotaPlugin) Init(ctx context.Context, provider *gophercloud.Pro }, }, } - p.OverrideQuota = make(map[string]map[liquid.ResourceName]core.Quotas) + p.OverrideQuota = make(map[string]map[liquid.ResourceName]liquid.ResourceQuotaRequest) return nil } @@ -231,7 +231,7 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr if exists { for resourceName, quota := range data { resData := result[resourceName] - resData.Quota = int64(quota.QuotaForResource) //nolint:gosec // uint64 -> int64 would only fail if quota is bigger than 2^63 + resData.Quota = int64(quota.Quota) //nolint:gosec // uint64 -> int64 would only fail if quota is bigger than 2^63 result[resourceName] = resData } } @@ -259,7 +259,7 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr } // SetQuota implements the core.QuotaPlugin interface. -func (p *GenericQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]core.Quotas) error { +func (p *GenericQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]liquid.ResourceQuotaRequest) error { if p.SetQuotaFails { return errors.New("SetQuota failed as requested") } diff --git a/internal/test/plugins/quota_noop.go b/internal/test/plugins/quota_noop.go index 27d36e211..dbd4734d7 100644 --- a/internal/test/plugins/quota_noop.go +++ b/internal/test/plugins/quota_noop.go @@ -120,6 +120,6 @@ func (p *NoopQuotaPlugin) BuildServiceUsageRequest(project core.KeystoneProject, } // SetQuota implements the core.QuotaPlugin interface. -func (p *NoopQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]core.Quotas) error { +func (p *NoopQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]liquid.ResourceQuotaRequest) error { return nil } diff --git a/main.go b/main.go index 2632fa11a..b53782774 100644 --- a/main.go +++ b/main.go @@ -453,7 +453,7 @@ func taskTestSetQuota(ctx context.Context, cluster *core.Cluster, args []string) project := must.Return(findProjectForTesting(ctx, cluster, args[0])) quotaValueRx := regexp.MustCompile(`^([^=]+)=(\d+)$`) - quotaValues := make(map[liquid.ResourceName]core.Quotas) + quotaValues := make(map[liquid.ResourceName]liquid.ResourceQuotaRequest) for _, arg := range args[2:] { match := quotaValueRx.FindStringSubmatch(arg) if match == nil { @@ -463,7 +463,7 @@ func taskTestSetQuota(ctx context.Context, cluster *core.Cluster, args []string) if err != nil { logg.Fatal(err.Error()) } - quotaValues[liquid.ResourceName(match[1])] = core.Quotas{QuotaForResource: val} + quotaValues[liquid.ResourceName(match[1])] = liquid.ResourceQuotaRequest{Quota: val} } must.Succeed(cluster.QuotaPlugins[serviceType].SetQuota(ctx, project, quotaValues)) From 463e9ea605a10805701f2723ce1cbd54263256bd Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 16:54:47 +0100 Subject: [PATCH 25/42] change UsageData quota attribute to a pointer type --- internal/collector/scrape.go | 2 +- internal/core/data.go | 2 +- internal/plugins/liquid.go | 2 +- internal/test/plugins/quota_generic.go | 10 ++++++---- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/internal/collector/scrape.go b/internal/collector/scrape.go index 6abb2ff05..2efc26345 100644 --- a/internal/collector/scrape.go +++ b/internal/collector/scrape.go @@ -311,7 +311,7 @@ func (c *Collector) writeResourceScrapeResult(dbDomain db.Domain, dbProject db.P // set AZ backend quota. resInfo := c.Cluster.InfoForResource(srv.Type, res.Name) if resInfo.Topology == liquid.AZSeparatedResourceTopology && resInfo.HasQuota { - azRes.BackendQuota = &data.Quota + azRes.BackendQuota = data.Quota } // reset backendQuota entries for topology changes if resInfo.Topology != liquid.AZSeparatedResourceTopology { diff --git a/internal/core/data.go b/internal/core/data.go index f06cf878e..7e3ed0ec5 100644 --- a/internal/core/data.go +++ b/internal/core/data.go @@ -190,7 +190,7 @@ func (r ResourceData) AddLocalizedUsage(az limes.AvailabilityZone, usage uint64) // UsageData contains usage data for a single project resource. // It appears in type ResourceData. type UsageData struct { - Quota int64 + Quota *int64 Usage uint64 PhysicalUsage *uint64 // only supported by some plugins Subresources []any // only if supported by plugin and enabled in config diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index 53c1bfb59..9779cd3be 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -160,7 +160,7 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro Subresources: castSliceToAny(azReport.Subresources), } if resInfo.Topology == liquid.AZSeparatedResourceTopology && azReport.Quota != nil { - resData.UsageData[az].Quota = *azReport.Quota + resData.UsageData[az].Quota = azReport.Quota } } diff --git a/internal/test/plugins/quota_generic.go b/internal/test/plugins/quota_generic.go index 1b56e9345..cb741ee2a 100644 --- a/internal/test/plugins/quota_generic.go +++ b/internal/test/plugins/quota_generic.go @@ -62,19 +62,21 @@ type GenericQuotaPlugin struct { // Init implements the core.QuotaPlugin interface. func (p *GenericQuotaPlugin) Init(ctx context.Context, provider *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, serviceType db.ServiceType) error { p.ServiceType = serviceType + thingsAZQuota := int64(21) + capacityAZQuota := int64(50) p.StaticResourceData = map[liquid.ResourceName]*core.ResourceData{ "things": { Quota: 42, UsageData: core.PerAZ[core.UsageData]{ - "az-one": {Usage: 2, Quota: 21}, - "az-two": {Usage: 2, Quota: 21}, + "az-one": {Usage: 2, Quota: &thingsAZQuota}, + "az-two": {Usage: 2, Quota: &thingsAZQuota}, }, }, "capacity": { Quota: 100, UsageData: core.PerAZ[core.UsageData]{ - "az-one": {Usage: 0, Quota: 50}, - "az-two": {Usage: 0, Quota: 50}, + "az-one": {Usage: 0, Quota: &capacityAZQuota}, + "az-two": {Usage: 0, Quota: &capacityAZQuota}, }, }, } From df05bc7e48bd2f8c0e52ea2e3d78fe8778e03311 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 17:03:29 +0100 Subject: [PATCH 26/42] fix MatchLiquidReportToTopology map type - fix unit test typing --- internal/collector/scrape_test.go | 11 +++++------ internal/plugins/utils.go | 2 +- internal/test/plugins/quota_generic.go | 10 +++++----- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/internal/collector/scrape_test.go b/internal/collector/scrape_test.go index 3fa28926b..0d38b05e1 100644 --- a/internal/collector/scrape_test.go +++ b/internal/collector/scrape_test.go @@ -599,10 +599,9 @@ func Test_TopologyScrapes(t *testing.T) { tr, tr0 := easypg.NewTracker(t, s.DB.Db) tr0.AssertEqualToFile("fixtures/scrape0.sql") - var status *any // positive: Sync az-separated quota values with the backend plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZSeparatedResourceTopology}} - plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"az-one": status, "az-two": status} + plugin.ReportedAZs = map[liquid.AvailabilityZone]struct{}{"az-one": {}, "az-two": {}} mustT(t, job.ProcessOne(s.Ctx, withLabel)) mustT(t, job.ProcessOne(s.Ctx, withLabel)) @@ -703,18 +702,18 @@ func Test_TopologyScrapes(t *testing.T) { // negative: scrape with flat topology returns invalid AZs plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.FlatResourceTopology}} - plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"az-one": status, "az-two": status} + plugin.ReportedAZs = map[liquid.AvailabilityZone]struct{}{"az-one": {}, "az-two": {}} mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with topology type: flat returned AZs: [az-one az-two]")) // negative: scrape with az-aware topology returns invalid any AZ plugin.LiquidServiceInfo.Resources["capacity"] = liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology} - plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"any": status} + plugin.ReportedAZs = map[liquid.AvailabilityZone]struct{}{"any": {}} mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: service: unittest, resource: capacity: scrape with topology type: az-aware returned AZs: [any]")) s.Clock.StepBy(scrapeInterval) // negative: scrape with az-separated topology returns invalid AZs any and unknown plugin.LiquidServiceInfo.Resources["capacity"] = liquid.ResourceInfo{Topology: liquid.AZSeparatedResourceTopology} - plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"az-one": status, "unknown": status} + plugin.ReportedAZs = map[liquid.AvailabilityZone]struct{}{"az-one": {}, "unknown": {}} mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with topology type: az-separated returned AZs: [az-one unknown]")) // negative: reject liquid initialization with invalid topologies @@ -724,6 +723,6 @@ func Test_TopologyScrapes(t *testing.T) { s.Clock.StepBy(scrapeInterval) // negative: multiple resources with mismatching topology to AZ responses plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZSeparatedResourceTopology}} - plugin.ReportedAZs = map[liquid.AvailabilityZone]*any{"unknown": status} + plugin.ReportedAZs = map[liquid.AvailabilityZone]struct{}{"unknown": {}} mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with topology type: az-separated returned AZs: [unknown]\nservice: unittest, resource: things: scrape with topology type: az-separated returned AZs: [unknown]")) } diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index cfe86818f..fd5dfd49d 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -59,7 +59,7 @@ func CheckResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { return } -func MatchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone]*V, topology liquid.ResourceTopology) (err error) { +func MatchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone]V, topology liquid.ResourceTopology) (err error) { _, anyExists := perAZReport[liquid.AvailabilityZoneAny] _, unknownExists := perAZReport[liquid.AvailabilityZoneUnknown] switch topology { diff --git a/internal/test/plugins/quota_generic.go b/internal/test/plugins/quota_generic.go index cb741ee2a..18f324adc 100644 --- a/internal/test/plugins/quota_generic.go +++ b/internal/test/plugins/quota_generic.go @@ -52,11 +52,11 @@ type GenericQuotaPlugin struct { StaticResourceAttributes map[liquid.ResourceName]map[string]any `yaml:"-"` OverrideQuota map[string]map[liquid.ResourceName]liquid.ResourceQuotaRequest `yaml:"-"` // first key is project UUID // behavior flags that can be set by a unit test - ReportedAZs map[liquid.AvailabilityZone]*any `yaml:"-"` - ScrapeFails bool `yaml:"-"` - SetQuotaFails bool `yaml:"-"` - MinQuota map[liquid.ResourceName]uint64 `yaml:"-"` - MaxQuota map[liquid.ResourceName]uint64 `yaml:"-"` + ReportedAZs map[liquid.AvailabilityZone]struct{} `yaml:"-"` + ScrapeFails bool `yaml:"-"` + SetQuotaFails bool `yaml:"-"` + MinQuota map[liquid.ResourceName]uint64 `yaml:"-"` + MaxQuota map[liquid.ResourceName]uint64 `yaml:"-"` } // Init implements the core.QuotaPlugin interface. From 3d2b1ddfd751ed289b3731eefeb86dc8751015fb Mon Sep 17 00:00:00 2001 From: Stefan Voigt <52136954+VoigtS@users.noreply.github.com> Date: Thu, 5 Dec 2024 17:07:29 +0100 Subject: [PATCH 27/42] Update internal/collector/scrape.go scrape now makes an explicit choice about how to fill backendQuota. Co-authored-by: Stefan Majewsky --- internal/collector/scrape.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/internal/collector/scrape.go b/internal/collector/scrape.go index 2efc26345..1715300ad 100644 --- a/internal/collector/scrape.go +++ b/internal/collector/scrape.go @@ -312,9 +312,7 @@ func (c *Collector) writeResourceScrapeResult(dbDomain db.Domain, dbProject db.P resInfo := c.Cluster.InfoForResource(srv.Type, res.Name) if resInfo.Topology == liquid.AZSeparatedResourceTopology && resInfo.HasQuota { azRes.BackendQuota = data.Quota - } - // reset backendQuota entries for topology changes - if resInfo.Topology != liquid.AZSeparatedResourceTopology { + } else { azRes.BackendQuota = nil } From f42f9ed801b8f9294743067add0170ac9c5c303e Mon Sep 17 00:00:00 2001 From: Stefan Voigt <52136954+VoigtS@users.noreply.github.com> Date: Thu, 5 Dec 2024 17:09:59 +0100 Subject: [PATCH 28/42] Update internal/datamodel/apply_computed_project_quota.go ACPQ: remove wrongly added allocated value for AZ separated base quota application. Unit test fixes follow. Co-authored-by: Stefan Majewsky --- internal/datamodel/apply_computed_project_quota.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/datamodel/apply_computed_project_quota.go b/internal/datamodel/apply_computed_project_quota.go index 425112ceb..f9d69a293 100644 --- a/internal/datamodel/apply_computed_project_quota.go +++ b/internal/datamodel/apply_computed_project_quota.go @@ -378,7 +378,7 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats // AZ separated topology receives the basequota to all available AZs if resInfo.Topology == liquid.AZSeparatedResourceTopology { for az := range isRelevantAZ { - target[az][resourceID].Desired = cfg.ProjectBaseQuota - target[az][resourceID].Allocated + target[az][resourceID].Desired = cfg.ProjectBaseQuota } } else { target[limes.AvailabilityZoneAny][resourceID].Desired = cfg.ProjectBaseQuota - sumOfLocalizedQuotas From 888a699d0fd0bcbbc473307186ed07e99f02e1d7 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 17:11:36 +0100 Subject: [PATCH 29/42] Fix unit tests for acpq for AZ seperated --- internal/datamodel/apply_computed_project_quota_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/datamodel/apply_computed_project_quota_test.go b/internal/datamodel/apply_computed_project_quota_test.go index 3f922cdbd..567269476 100644 --- a/internal/datamodel/apply_computed_project_quota_test.go +++ b/internal/datamodel/apply_computed_project_quota_test.go @@ -208,7 +208,7 @@ func TestACPQBasicWithAZSeparated(t *testing.T) { 404: {Allocated: 20}, 405: {Allocated: 72}, // 60 * 1.2 = 72 406: {Allocated: 10}, // Basequota - 407: {Allocated: 7}, // 2 * 1.2 = 2.4 rounded to 3 (guaranteed minimum growth) -> Basquota: 10 - 3 = 7 + 407: {Allocated: 10}, // Basequota }, "az-two": { 401: {Allocated: 24}, @@ -217,7 +217,7 @@ func TestACPQBasicWithAZSeparated(t *testing.T) { 404: {Allocated: 15}, 405: {Allocated: 48}, // 40 * 1.2 = 48 406: {Allocated: 10}, // Basequota - 407: {Allocated: 8}, // 1 * 1.2 = 1.2 rounded to 2 (guaranteed minimum growth) -> Basequota: 10 - 2 = 8 + 407: {Allocated: 10}, // Basequota }, }) } From 9b87f31bbc038d5cdec6bdfeb04857b8cfe5dd5d Mon Sep 17 00:00:00 2001 From: VoigtS Date: Thu, 5 Dec 2024 19:41:23 +0100 Subject: [PATCH 30/42] scrape: create AZ manually in order locate basequota to if not delivered by the API --- internal/collector/scrape.go | 23 +++++++++++++------- internal/collector/scrape_test.go | 35 ++++++++++++++++++++++++++++--- 2 files changed, 47 insertions(+), 11 deletions(-) diff --git a/internal/collector/scrape.go b/internal/collector/scrape.go index 1715300ad..53495ef68 100644 --- a/internal/collector/scrape.go +++ b/internal/collector/scrape.go @@ -215,15 +215,22 @@ func (c *Collector) writeResourceScrapeResult(dbDomain db.Domain, dbProject db.P resourceData[resName] = resData } else { // AZ separated resources will not include "any" AZ. The basequota will be distributed towards the existing AZs. + // If an AZ is not available within the scrape response, it will be created to store the basequota. if resInfo.Topology == liquid.AZSeparatedResourceTopology { - continue - } - // for AZ-aware resources, ensure that we also have a ProjectAZResource in - // "any", because ApplyComputedProjectQuota needs somewhere to write base - // quotas into if enabled - _, exists := resData.UsageData[liquid.AvailabilityZoneAny] - if !exists { - resData.UsageData[liquid.AvailabilityZoneAny] = &core.UsageData{Usage: 0} + for _, availabilityZone := range c.Cluster.Config.AvailabilityZones { + _, exists := resData.UsageData[availabilityZone] + if !exists { + resData.UsageData[availabilityZone] = &core.UsageData{Usage: 0} + } + } + } else { + // for AZ-aware resources, ensure that we also have a ProjectAZResource in + // "any", because ApplyComputedProjectQuota needs somewhere to write base + // quotas into if enabled + _, exists := resData.UsageData[liquid.AvailabilityZoneAny] + if !exists { + resData.UsageData[liquid.AvailabilityZoneAny] = &core.UsageData{Usage: 0} + } } } } diff --git a/internal/collector/scrape_test.go b/internal/collector/scrape_test.go index 0d38b05e1..05ff6b238 100644 --- a/internal/collector/scrape_test.go +++ b/internal/collector/scrape_test.go @@ -683,6 +683,8 @@ func Test_TopologyScrapes(t *testing.T) { mustT(t, job.ProcessOne(s.Ctx, withLabel)) mustT(t, job.ProcessOne(s.Ctx, withLabel)) + checkedAt1 := s.Clock.Now().Add(-5 * time.Second) + checkedAt2 := s.Clock.Now() tr.DBChanges().AssertEqualf(` UPDATE project_az_resources SET backend_quota = 50 WHERE id = 1 AND resource_id = 1 AND az = 'az-one'; UPDATE project_az_resources SET backend_quota = NULL WHERE id = 13 AND resource_id = 6 AND az = 'az-one'; @@ -694,12 +696,39 @@ func Test_TopologyScrapes(t *testing.T) { UPDATE project_az_resources SET backend_quota = NULL WHERE id = 7 AND resource_id = 3 AND az = 'az-two'; UPDATE project_az_resources SET backend_quota = 50 WHERE id = 8 AND resource_id = 4 AND az = 'az-one'; UPDATE project_az_resources SET backend_quota = 50 WHERE id = 9 AND resource_id = 4 AND az = 'az-two'; - UPDATE project_services SET scraped_at = 1825, checked_at = 1825, next_scrape_at = 3625 WHERE id = 1 AND project_id = 1 AND type = 'unittest'; - UPDATE project_services SET scraped_at = 1830, checked_at = 1830, next_scrape_at = 3630 WHERE id = 2 AND project_id = 2 AND type = 'unittest'; - `) + UPDATE project_services SET scraped_at = %[1]d, checked_at = %[1]d, next_scrape_at = %[2]d WHERE id = 1 AND project_id = 1 AND type = 'unittest'; + UPDATE project_services SET scraped_at = %[3]d, checked_at = %[3]d, next_scrape_at = %[4]d WHERE id = 2 AND project_id = 2 AND type = 'unittest'; + `, + checkedAt1.Unix(), checkedAt1.Add(scrapeInterval).Unix(), + checkedAt2.Unix(), checkedAt2.Add(scrapeInterval).Unix(), + ) s.Clock.StepBy(scrapeInterval) + // positive: missing AZ in resource report will be created by the scraper in order to assign basequota later. + // warning: any AZs will be removed, because resource things switches from AZAware to AZSeparated. + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZSeparatedResourceTopology}} + delete(plugin.StaticResourceData["things"].UsageData, "az-two") + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + + checkedAt1 = s.Clock.Now().Add(-5 * time.Second) + checkedAt2 = s.Clock.Now() + tr.DBChanges().AssertEqualf(` + UPDATE project_az_resources SET backend_quota = 21 WHERE id = 13 AND resource_id = 6 AND az = 'az-one'; + UPDATE project_az_resources SET usage = 0, subresources = '', historical_usage = '{"t":[%[2]d,%[5]d],"v":[2,0]}' WHERE id = 14 AND resource_id = 6 AND az = 'az-two'; + DELETE FROM project_az_resources WHERE id = 15 AND resource_id = 3 AND az = 'any'; + DELETE FROM project_az_resources WHERE id = 16 AND resource_id = 6 AND az = 'any'; + UPDATE project_az_resources SET backend_quota = 21 WHERE id = 6 AND resource_id = 3 AND az = 'az-one'; + UPDATE project_az_resources SET usage = 0, subresources = '', historical_usage = '{"t":[%[1]d,%[3]d],"v":[2,0]}' WHERE id = 7 AND resource_id = 3 AND az = 'az-two'; + UPDATE project_services SET scraped_at = %[3]d, serialized_metrics = '{"capacity_usage":0,"things_usage":2}', checked_at = %[3]d, next_scrape_at = %[4]d WHERE id = 1 AND project_id = 1 AND type = 'unittest'; + UPDATE project_services SET scraped_at = %[5]d, serialized_metrics = '{"capacity_usage":0,"things_usage":2}', checked_at = %[5]d, next_scrape_at = %[6]d WHERE id = 2 AND project_id = 2 AND type = 'unittest'; + `, + scrapedAt1.Unix(), scrapedAt2.Unix(), + checkedAt1.Unix(), checkedAt1.Add(scrapeInterval).Unix(), + checkedAt2.Unix(), checkedAt2.Add(scrapeInterval).Unix(), + ) + s.Clock.StepBy(scrapeInterval) // negative: scrape with flat topology returns invalid AZs plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.FlatResourceTopology}} plugin.ReportedAZs = map[liquid.AvailabilityZone]struct{}{"az-one": {}, "az-two": {}} From a5280953549add3184690d75f311c8dda13ddd38 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Fri, 6 Dec 2024 10:34:12 +0100 Subject: [PATCH 31/42] ensure that all ResourceInfo have a valid topology Because internal algorithms depend on a topology being chosen. --- internal/plugins/nova.go | 12 +++++++----- internal/plugins/utils.go | 7 ++++++- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/internal/plugins/nova.go b/internal/plugins/nova.go index 839a78743..823adb1ef 100644 --- a/internal/plugins/nova.go +++ b/internal/plugins/nova.go @@ -60,11 +60,11 @@ type novaPlugin struct { } var novaDefaultResources = map[liquid.ResourceName]liquid.ResourceInfo{ - "cores": {Unit: limes.UnitNone, HasQuota: true}, - "instances": {Unit: limes.UnitNone, HasQuota: true}, - "ram": {Unit: limes.UnitMebibytes, HasQuota: true}, - "server_groups": {Unit: limes.UnitNone, HasQuota: true}, - "server_group_members": {Unit: limes.UnitNone, HasQuota: true}, + "cores": {Unit: limes.UnitNone, HasQuota: true, Topology: liquid.AZAwareResourceTopology}, + "instances": {Unit: limes.UnitNone, HasQuota: true, Topology: liquid.AZAwareResourceTopology}, + "ram": {Unit: limes.UnitMebibytes, HasQuota: true, Topology: liquid.AZAwareResourceTopology}, + "server_groups": {Unit: limes.UnitNone, HasQuota: true, Topology: liquid.FlatResourceTopology}, + "server_group_members": {Unit: limes.UnitNone, HasQuota: true, Topology: liquid.FlatResourceTopology}, } func init() { @@ -121,6 +121,7 @@ func (p *novaPlugin) Init(ctx context.Context, provider *gophercloud.ProviderCli p.resources[liquid.ResourceName(resourceName)] = liquid.ResourceInfo{ Unit: unit, HasQuota: true, + Topology: liquid.AZAwareResourceTopology, } } @@ -134,6 +135,7 @@ func (p *novaPlugin) Init(ctx context.Context, provider *gophercloud.ProviderCli p.resources[resName] = liquid.ResourceInfo{ Unit: limes.UnitNone, HasQuota: true, + Topology: liquid.AZAwareResourceTopology, } } } diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index fd5dfd49d..6bca811c2 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -47,7 +47,12 @@ func CheckResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { for _, resourceName := range resourceNames { topology := resources[resourceName].Topology if topology == "" { - logg.Error("missing topology on resource: %s", resourceName) + // several algorithms inside Limes depend on a topology being chosen, so we have to pick a default for now + // TODO: make this a fatal error once liquid-ceph has rolled out their Topology patch + logg.Error("missing topology on resource: %s (assuming %q)", resourceName, liquid.FlatResourceTopology) + resInfo := resources[resourceName] + resInfo.Topology = liquid.FlatResourceTopology + resources[resourceName] = resInfo } if !topology.IsValid() { errs = append(errs, fmt.Errorf("invalid topology: %s on resource: %s", topology, resourceName)) From 22dfbd75211c81ec9494acc52b00c6f2fc223ab2 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 6 Dec 2024 10:35:41 +0100 Subject: [PATCH 32/42] sync quota: set updated backendquotas on az level by resourceIDs instead of using service id using the service ID causes an update to all resources in this service disregarding the true topology. --- internal/collector/sync_quota_to_backend.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/internal/collector/sync_quota_to_backend.go b/internal/collector/sync_quota_to_backend.go index 36419b382..42b480106 100644 --- a/internal/collector/sync_quota_to_backend.go +++ b/internal/collector/sync_quota_to_backend.go @@ -25,6 +25,7 @@ import ( "fmt" "time" + "github.com/lib/pq" "github.com/prometheus/client_golang/prometheus" "github.com/sapcc/go-api-declarations/liquid" "github.com/sapcc/go-bits/jobloop" @@ -108,14 +109,9 @@ var ( WHERE service_id = $1 `) azQuotaSyncMarkResourcesAsAppliedQuery = sqlext.SimplifyWhitespace(` - WITH resourceIDs as ( - SELECT id - FROM project_resources - WHERE service_id = $1 - ) UPDATE project_az_resources SET backend_quota = quota - WHERE resource_id in (SELECT id from resourceIDs) + WHERE resource_id = ANY($1) `) quotaSyncMarkServiceAsAppliedQuery = sqlext.SimplifyWhitespace(` UPDATE project_services @@ -141,6 +137,7 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, targetAZQuotasInDB := make(map[liquid.ResourceName]map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest) needsApply := false azSeparatedNeedsApply := false + var azSeparatedResouceIDs []db.ProjectResourceID err := sqlext.ForeachRow(c.DB, quotaSyncSelectQuery, []any{srv.ID}, func(rows *sql.Rows) error { var ( resourceID db.ProjectResourceID @@ -175,6 +172,7 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, if (availabilityZone == liquid.AvailabilityZoneAny || availabilityZone == liquid.AvailabilityZoneUnknown) && currentAZQuota != nil { return fmt.Errorf("detected invalid AZ: %s for resource: %s with topology: %s has backend_quota: %v", availabilityZone, resourceName, resInfo.Topology, currentAZQuota) } + azSeparatedResouceIDs = append(azSeparatedResouceIDs, resourceID) targetAZQuotasInDB[resourceName] = make(map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest) targetAZQuotasInDB[resourceName][availabilityZone] = liquid.AZResourceQuotaRequest{Quota: targetAZQuota} if currentAZQuota == nil || *currentAZQuota < 0 || uint64(*currentAZQuota) != targetAZQuota { @@ -220,7 +218,7 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, return err } if azSeparatedNeedsApply { - _, err = c.DB.Exec(azQuotaSyncMarkResourcesAsAppliedQuery, srv.ID) + _, err = c.DB.Exec(azQuotaSyncMarkResourcesAsAppliedQuery, pq.Array(azSeparatedResouceIDs)) if err != nil { return err } From 2bbde929459b9eaa3478bd265b251d7faeadcde6 Mon Sep 17 00:00:00 2001 From: Stefan Voigt <52136954+VoigtS@users.noreply.github.com> Date: Fri, 6 Dec 2024 14:07:38 +0100 Subject: [PATCH 33/42] Update internal/collector/sync_quota_to_backend.go quota sync: avoid overwrite targetQuota map override from other AZs if the map is already initialized. Co-authored-by: Stefan Majewsky --- internal/collector/sync_quota_to_backend.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/collector/sync_quota_to_backend.go b/internal/collector/sync_quota_to_backend.go index 42b480106..24e4e2c6b 100644 --- a/internal/collector/sync_quota_to_backend.go +++ b/internal/collector/sync_quota_to_backend.go @@ -173,7 +173,9 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, return fmt.Errorf("detected invalid AZ: %s for resource: %s with topology: %s has backend_quota: %v", availabilityZone, resourceName, resInfo.Topology, currentAZQuota) } azSeparatedResouceIDs = append(azSeparatedResouceIDs, resourceID) - targetAZQuotasInDB[resourceName] = make(map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest) + if targetAZQuotasInDB[resourceName] == nil { + targetAZQuotasInDB[resourceName] = make(map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest) + } targetAZQuotasInDB[resourceName][availabilityZone] = liquid.AZResourceQuotaRequest{Quota: targetAZQuota} if currentAZQuota == nil || *currentAZQuota < 0 || uint64(*currentAZQuota) != targetAZQuota { azSeparatedNeedsApply = true From b0512eee813844bb8cd1acbc3107b54ada4332ab Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 6 Dec 2024 14:19:08 +0100 Subject: [PATCH 34/42] change sort function naming. Adjust set quota value assignment --- internal/plugins/capacity_liquid.go | 2 +- internal/plugins/liquid.go | 6 ++---- internal/plugins/utils.go | 6 +++--- internal/test/plugins/quota_generic.go | 2 +- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/internal/plugins/capacity_liquid.go b/internal/plugins/capacity_liquid.go index 7f1278fbb..b8a044f9e 100644 --- a/internal/plugins/capacity_liquid.go +++ b/internal/plugins/capacity_liquid.go @@ -94,7 +94,7 @@ func (p *liquidCapacityPlugin) Scrape(ctx context.Context, backchannel core.Capa logg.Fatal("ServiceInfo version for %s changed from %d to %d; restarting now to reload ServiceInfo...", p.LiquidServiceType, p.LiquidServiceInfo.Version, resp.InfoVersion) } - resourceNames := SortMapKeys(p.LiquidServiceInfo.Resources) + resourceNames := SortedMapKeys(p.LiquidServiceInfo.Resources) var errs []error for _, resourceName := range resourceNames { perAZ := resp.Resources[resourceName].PerAZ diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index 9779cd3be..8617313ab 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -123,7 +123,7 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro logg.Fatal("ServiceInfo version for %s changed from %d to %d; restarting now to reload ServiceInfo...", p.LiquidServiceType, p.LiquidServiceInfo.Version, resp.InfoVersion) } - resourceNames := SortMapKeys(p.LiquidServiceInfo.Resources) + resourceNames := SortedMapKeys(p.LiquidServiceInfo.Resources) var errs []error for _, resourceName := range resourceNames { perAZ := resp.Resources[resourceName].PerAZ @@ -198,9 +198,7 @@ func (p *liquidQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneP req := liquid.ServiceQuotaRequest{ Resources: make(map[liquid.ResourceName]liquid.ResourceQuotaRequest, len(quotaReq)), } - for resName, request := range quotaReq { - req.Resources[resName] = liquid.ResourceQuotaRequest{Quota: request.Quota, PerAZ: request.PerAZ} - } + req = liquid.ServiceQuotaRequest{Resources: quotaReq} if p.LiquidServiceInfo.QuotaUpdateNeedsProjectMetadata { req.ProjectMetadata = project.ForLiquid() } diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index 6bca811c2..944e97239 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -33,7 +33,7 @@ func p2u64(val uint64) *uint64 { return &val } -func SortMapKeys[M map[K]V, K ~string, V any](mapToSort M) []K { +func SortedMapKeys[M map[K]V, K ~string, V any](mapToSort M) []K { sortedKeys := slices.Collect(maps.Keys(mapToSort)) slices.Sort(sortedKeys) return sortedKeys @@ -43,7 +43,7 @@ func CheckResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { var errs []error resources := serviceInfo.Resources - resourceNames := SortMapKeys(resources) + resourceNames := SortedMapKeys(resources) for _, resourceName := range resourceNames { topology := resources[resourceName].Topology if topology == "" { @@ -84,6 +84,6 @@ func MatchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone] return } - reportedAZs := SortMapKeys(perAZReport) + reportedAZs := SortedMapKeys(perAZReport) return fmt.Errorf("scrape with topology type: %s returned AZs: %v", topology, reportedAZs) } diff --git a/internal/test/plugins/quota_generic.go b/internal/test/plugins/quota_generic.go index 18f324adc..1373b07c4 100644 --- a/internal/test/plugins/quota_generic.go +++ b/internal/test/plugins/quota_generic.go @@ -174,7 +174,7 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr if len(p.ReportedAZs) > 0 { var errs []error - resourceNames := plugins.SortMapKeys(p.LiquidServiceInfo.Resources) + resourceNames := plugins.SortedMapKeys(p.LiquidServiceInfo.Resources) for _, resourceName := range resourceNames { topology := p.LiquidServiceInfo.Resources[resourceName].Topology err := plugins.MatchLiquidReportToTopology(p.ReportedAZs, topology) From 202959c74850a8d1012a95bd72913432ad2db348 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 6 Dec 2024 14:22:15 +0100 Subject: [PATCH 35/42] Set quota: fix request initialization --- internal/plugins/liquid.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index 8617313ab..e91301186 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -195,10 +195,7 @@ func castSliceToAny[T any](input []T) (output []any) { // SetQuota implements the core.QuotaPlugin interface. func (p *liquidQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotaReq map[liquid.ResourceName]liquid.ResourceQuotaRequest) error { - req := liquid.ServiceQuotaRequest{ - Resources: make(map[liquid.ResourceName]liquid.ResourceQuotaRequest, len(quotaReq)), - } - req = liquid.ServiceQuotaRequest{Resources: quotaReq} + req := liquid.ServiceQuotaRequest{Resources: quotaReq} if p.LiquidServiceInfo.QuotaUpdateNeedsProjectMetadata { req.ProjectMetadata = project.ForLiquid() } From ba075b73ad5052b1afb8e32a828d6a9e10db874d Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 6 Dec 2024 14:41:52 +0100 Subject: [PATCH 36/42] acpq tests: each case now provides a specific toplogy --- .../apply_computed_project_quota_test.go | 44 +++++++------------ 1 file changed, 17 insertions(+), 27 deletions(-) diff --git a/internal/datamodel/apply_computed_project_quota_test.go b/internal/datamodel/apply_computed_project_quota_test.go index 567269476..b0013989e 100644 --- a/internal/datamodel/apply_computed_project_quota_test.go +++ b/internal/datamodel/apply_computed_project_quota_test.go @@ -72,7 +72,7 @@ func TestACPQBasicWithoutAZAwareness(t *testing.T) { 405: {Allocated: 10}, 406: {Allocated: 10}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) } } @@ -157,7 +157,7 @@ func TestACPQBasicWithAZAwareness(t *testing.T) { 406: {Allocated: 10}, 407: {Allocated: 5}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) } } @@ -200,7 +200,7 @@ func TestACPQBasicWithAZSeparated(t *testing.T) { } for _, cfg.AllowQuotaOvercommitUntilAllocatedPercent = range []float64{0, 10000} { - expectACPQResultForAZSeparated(t, input, cfg, nil, acpqGlobalTarget{ + expectACPQResult(t, input, cfg, nil, acpqGlobalTarget{ "az-one": { 401: {Allocated: 24}, 402: {Allocated: 24}, @@ -219,7 +219,7 @@ func TestACPQBasicWithAZSeparated(t *testing.T) { 406: {Allocated: 10}, // Basequota 407: {Allocated: 10}, // Basequota }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZSeparatedResourceTopology}) } } @@ -262,7 +262,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 5}, 405: {Allocated: 5}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) // Stage 2: There is enough capacity for the minimum quotas, but not for the // desired quotas. @@ -281,7 +281,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 0}, 405: {Allocated: 0}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) // Stage 3: There is enough capacity for the hard minimum quotas, but not for // the soft minimum quotas. @@ -300,7 +300,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 0}, 405: {Allocated: 0}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) // Stage 4: Capacity is SOMEHOW not even enough for the hard minimum quotas. input["any"] = clusterAZAllocationStats{ @@ -318,7 +318,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 0}, 405: {Allocated: 0}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) } func TestACPQQuotaOvercommitTurnsOffAboveAllocationThreshold(t *testing.T) { @@ -369,7 +369,7 @@ func TestACPQQuotaOvercommitTurnsOffAboveAllocationThreshold(t *testing.T) { 404: {Allocated: 10}, 405: {Allocated: 10}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) // test with quota overcommit forbidden (85% allocation is above 80%) cfg.AllowQuotaOvercommitUntilAllocatedPercent = 80 @@ -389,7 +389,7 @@ func TestACPQQuotaOvercommitTurnsOffAboveAllocationThreshold(t *testing.T) { 404: {}, 405: {}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) } func TestACPQWithProjectLocalQuotaConstraints(t *testing.T) { @@ -437,7 +437,7 @@ func TestACPQWithProjectLocalQuotaConstraints(t *testing.T) { 401: {Allocated: 36}, 402: {Allocated: 16}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) // test with MinQuota constraints // @@ -463,7 +463,7 @@ func TestACPQWithProjectLocalQuotaConstraints(t *testing.T) { 401: {Allocated: 0}, 402: {Allocated: 16}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) // test with MaxQuota constraints that constrain the soft minimum (hard minimum is not constrainable) constraints = map[db.ProjectResourceID]projectLocalQuotaConstraints{ @@ -483,7 +483,7 @@ func TestACPQWithProjectLocalQuotaConstraints(t *testing.T) { 401: {Allocated: 0}, 402: {Allocated: 0}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) // test with MaxQuota constraints that constrain the base quota constraints = map[db.ProjectResourceID]projectLocalQuotaConstraints{ @@ -503,7 +503,7 @@ func TestACPQWithProjectLocalQuotaConstraints(t *testing.T) { 401: {Allocated: 26}, 402: {Allocated: 6}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) } func TestEmptyRegionDoesNotPrecludeQuotaOvercommit(t *testing.T) { @@ -593,7 +593,7 @@ func TestEmptyRegionDoesNotPrecludeQuotaOvercommit(t *testing.T) { 404: {Allocated: 5}, 405: {Allocated: 5}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) } // Shortcut to avoid repetition in projectAZAllocationStats literals. @@ -610,19 +610,9 @@ func withCommitted(committed uint64, stats projectAZAllocationStats) projectAZAl return stats } -func expectACPQResultForAZSeparated(t *testing.T, input map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints, expected acpqGlobalTarget) { - t.Helper() - actual, _ := acpqComputeQuotas(input, cfg, constraints, liquid.ResourceInfo{Topology: liquid.AZSeparatedResourceTopology}) - createACPQResult(t, input, cfg, actual, expected) -} - -func expectACPQResult(t *testing.T, input map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints, expected acpqGlobalTarget) { +func expectACPQResult(t *testing.T, input map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints, expected acpqGlobalTarget, resourceInfo liquid.ResourceInfo) { t.Helper() - actual, _ := acpqComputeQuotas(input, cfg, constraints, liquid.ResourceInfo{}) - createACPQResult(t, input, cfg, actual, expected) -} - -func createACPQResult(t *testing.T, input map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, actual, expected acpqGlobalTarget) { + actual, _ := acpqComputeQuotas(input, cfg, constraints, resourceInfo) // normalize away any left-over intermediate values for _, azTarget := range actual { for _, projectTarget := range azTarget { From b1e3d831831c7dacae3d1ba3035b500c9c9bcc50 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 6 Dec 2024 14:44:37 +0100 Subject: [PATCH 37/42] plugins: adjust failing MatchLiquidReport error message --- internal/plugins/capacity_liquid.go | 2 +- internal/plugins/liquid.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/plugins/capacity_liquid.go b/internal/plugins/capacity_liquid.go index b8a044f9e..b487cf456 100644 --- a/internal/plugins/capacity_liquid.go +++ b/internal/plugins/capacity_liquid.go @@ -101,7 +101,7 @@ func (p *liquidCapacityPlugin) Scrape(ctx context.Context, backchannel core.Capa topology := p.LiquidServiceInfo.Resources[resourceName].Topology err := MatchLiquidReportToTopology(perAZ, topology) if err != nil { - errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) + errs = append(errs, fmt.Errorf("resource: %s: %w", resourceName, err)) } } if len(errs) > 0 { diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index e91301186..c6105de54 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -130,7 +130,7 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro topology := p.LiquidServiceInfo.Resources[resourceName].Topology err := MatchLiquidReportToTopology(perAZ, topology) if err != nil { - errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) + errs = append(errs, fmt.Errorf("resource: %s: %w", resourceName, err)) } } if len(errs) > 0 { From f564760fb1c7667cc1b9639b32aba576ab088727 Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 6 Dec 2024 15:56:52 +0100 Subject: [PATCH 38/42] ACPQ: Do not set resource acpq quota if toplogy is AZ separated. This would otherwise lead to unecessary quota syncs --- internal/datamodel/apply_computed_project_quota.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/datamodel/apply_computed_project_quota.go b/internal/datamodel/apply_computed_project_quota.go index f9d69a293..87ae63bf7 100644 --- a/internal/datamodel/apply_computed_project_quota.go +++ b/internal/datamodel/apply_computed_project_quota.go @@ -189,6 +189,11 @@ func ApplyComputedProjectQuota(serviceType db.ServiceType, resourceName liquid.R } servicesWithUpdatedQuota := make(map[db.ProjectServiceID]struct{}) err = sqlext.WithPreparedStatement(tx, acpqUpdateProjectQuotaQuery, func(stmt *sql.Stmt) error { + // Skip resources with AZSeparated toplogy. The quota scrape would receive a resource nil value, while ACPQ calculates qouta. + // This would lead to unnecessary quota syncs with the backend, because backendQuota != quota. + if resInfo.Topology == liquid.AZSeparatedResourceTopology { + return nil + } for resourceID, quota := range quotasByResourceID { var serviceID db.ProjectServiceID err := stmt.QueryRow(quota, resourceID).Scan(&serviceID) From 3f3a8a02989138bb95da743546a9c37bcacaf915 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Fri, 6 Dec 2024 16:38:21 +0100 Subject: [PATCH 39/42] review: fix topology for non-AZ-aware ACPQ tests --- .../datamodel/apply_computed_project_quota_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/datamodel/apply_computed_project_quota_test.go b/internal/datamodel/apply_computed_project_quota_test.go index b0013989e..c8503e7df 100644 --- a/internal/datamodel/apply_computed_project_quota_test.go +++ b/internal/datamodel/apply_computed_project_quota_test.go @@ -72,7 +72,7 @@ func TestACPQBasicWithoutAZAwareness(t *testing.T) { 405: {Allocated: 10}, 406: {Allocated: 10}, }, - }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) + }, liquid.ResourceInfo{Topology: liquid.FlatResourceTopology}) } } @@ -262,7 +262,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 5}, 405: {Allocated: 5}, }, - }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) + }, liquid.ResourceInfo{Topology: liquid.FlatResourceTopology}) // Stage 2: There is enough capacity for the minimum quotas, but not for the // desired quotas. @@ -281,7 +281,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 0}, 405: {Allocated: 0}, }, - }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) + }, liquid.ResourceInfo{Topology: liquid.FlatResourceTopology}) // Stage 3: There is enough capacity for the hard minimum quotas, but not for // the soft minimum quotas. @@ -300,7 +300,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 0}, 405: {Allocated: 0}, }, - }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) + }, liquid.ResourceInfo{Topology: liquid.FlatResourceTopology}) // Stage 4: Capacity is SOMEHOW not even enough for the hard minimum quotas. input["any"] = clusterAZAllocationStats{ @@ -318,7 +318,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 0}, 405: {Allocated: 0}, }, - }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) + }, liquid.ResourceInfo{Topology: liquid.FlatResourceTopology}) } func TestACPQQuotaOvercommitTurnsOffAboveAllocationThreshold(t *testing.T) { From 32771f11f8166cb85b9a22c144dccec1e2ebb981 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Fri, 6 Dec 2024 16:38:50 +0100 Subject: [PATCH 40/42] review: fix typos and whitespace --- internal/collector/sync_quota_to_backend.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/collector/sync_quota_to_backend.go b/internal/collector/sync_quota_to_backend.go index 24e4e2c6b..470382e59 100644 --- a/internal/collector/sync_quota_to_backend.go +++ b/internal/collector/sync_quota_to_backend.go @@ -100,7 +100,7 @@ var ( `) azQuotaSyncSelectQuery = sqlext.SimplifyWhitespace(` SELECT az, backend_quota, quota - FROM project_az_resources + FROM project_az_resources WHERE resource_id = $1 AND quota IS NOT NULL `) quotaSyncMarkResourcesAsAppliedQuery = sqlext.SimplifyWhitespace(` @@ -111,7 +111,7 @@ var ( azQuotaSyncMarkResourcesAsAppliedQuery = sqlext.SimplifyWhitespace(` UPDATE project_az_resources SET backend_quota = quota - WHERE resource_id = ANY($1) + WHERE resource_id = ANY($1) `) quotaSyncMarkServiceAsAppliedQuery = sqlext.SimplifyWhitespace(` UPDATE project_services @@ -137,7 +137,7 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, targetAZQuotasInDB := make(map[liquid.ResourceName]map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest) needsApply := false azSeparatedNeedsApply := false - var azSeparatedResouceIDs []db.ProjectResourceID + var azSeparatedResourceIDs []db.ProjectResourceID err := sqlext.ForeachRow(c.DB, quotaSyncSelectQuery, []any{srv.ID}, func(rows *sql.Rows) error { var ( resourceID db.ProjectResourceID @@ -172,7 +172,7 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, if (availabilityZone == liquid.AvailabilityZoneAny || availabilityZone == liquid.AvailabilityZoneUnknown) && currentAZQuota != nil { return fmt.Errorf("detected invalid AZ: %s for resource: %s with topology: %s has backend_quota: %v", availabilityZone, resourceName, resInfo.Topology, currentAZQuota) } - azSeparatedResouceIDs = append(azSeparatedResouceIDs, resourceID) + azSeparatedResourceIDs = append(azSeparatedResourceIDs, resourceID) if targetAZQuotasInDB[resourceName] == nil { targetAZQuotasInDB[resourceName] = make(map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest) } @@ -220,7 +220,7 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, return err } if azSeparatedNeedsApply { - _, err = c.DB.Exec(azQuotaSyncMarkResourcesAsAppliedQuery, pq.Array(azSeparatedResouceIDs)) + _, err = c.DB.Exec(azQuotaSyncMarkResourcesAsAppliedQuery, pq.Array(azSeparatedResourceIDs)) if err != nil { return err } From 717a02832ecaa78cc2e5616a460c3be27b84c13b Mon Sep 17 00:00:00 2001 From: VoigtS Date: Fri, 6 Dec 2024 17:51:38 +0100 Subject: [PATCH 41/42] ACPQ: prepare servicesWithUpdatedQuota for AZSeparated Toplogy. --- internal/datamodel/apply_computed_project_quota.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/internal/datamodel/apply_computed_project_quota.go b/internal/datamodel/apply_computed_project_quota.go index 87ae63bf7..2142f165d 100644 --- a/internal/datamodel/apply_computed_project_quota.go +++ b/internal/datamodel/apply_computed_project_quota.go @@ -165,6 +165,7 @@ func ApplyComputedProjectQuota(serviceType db.ServiceType, resourceName liquid.R } // write new AZ quotas to database + servicesWithUpdatedQuota := make(map[db.ProjectServiceID]struct{}) err = sqlext.WithPreparedStatement(tx, acpqUpdateAZQuotaQuery, func(stmt *sql.Stmt) error { for az, azTarget := range target { for resourceID, projectTarget := range azTarget { @@ -172,6 +173,15 @@ func ApplyComputedProjectQuota(serviceType db.ServiceType, resourceName liquid.R if err != nil { return fmt.Errorf("in AZ %s in project resource %d: %w", az, resourceID, err) } + // AZSeparated Toplogy does not update resource quota. Therefore the service desync will be prepared right here. + if resInfo.Topology == liquid.AZSeparatedResourceTopology { + var serviceID db.ProjectServiceID + err := tx.SelectOne(&serviceID, `SELECT service_id FROM project_resources WHERE id = $1`, resourceID) + if err != nil { + return fmt.Errorf("in project resource %d: %w", resourceID, err) + } + servicesWithUpdatedQuota[serviceID] = struct{}{} + } } } return nil @@ -187,7 +197,7 @@ func ApplyComputedProjectQuota(serviceType db.ServiceType, resourceName liquid.R quotasByResourceID[resourceID] += projectTarget.Allocated } } - servicesWithUpdatedQuota := make(map[db.ProjectServiceID]struct{}) + err = sqlext.WithPreparedStatement(tx, acpqUpdateProjectQuotaQuery, func(stmt *sql.Stmt) error { // Skip resources with AZSeparated toplogy. The quota scrape would receive a resource nil value, while ACPQ calculates qouta. // This would lead to unnecessary quota syncs with the backend, because backendQuota != quota. From 8e72ed5a9504db918c3a240c6f005d5730323e34 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Mon, 9 Dec 2024 15:24:01 +0100 Subject: [PATCH 42/42] review: fix typos and whitespace --- internal/datamodel/apply_computed_project_quota.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/datamodel/apply_computed_project_quota.go b/internal/datamodel/apply_computed_project_quota.go index 2142f165d..4bddbcb6a 100644 --- a/internal/datamodel/apply_computed_project_quota.go +++ b/internal/datamodel/apply_computed_project_quota.go @@ -47,7 +47,7 @@ var ( WHERE ps.type = $1 AND pr.name = $2 AND (pr.min_quota_from_backend IS NOT NULL OR pr.max_quota_from_backend IS NOT NULL OR pr.max_quota_from_outside_admin IS NOT NULL - OR pr.max_quota_from_local_admin IS NOT NULL + OR pr.max_quota_from_local_admin IS NOT NULL OR pr.override_quota_from_config IS NOT NULL) `) @@ -173,7 +173,7 @@ func ApplyComputedProjectQuota(serviceType db.ServiceType, resourceName liquid.R if err != nil { return fmt.Errorf("in AZ %s in project resource %d: %w", az, resourceID, err) } - // AZSeparated Toplogy does not update resource quota. Therefore the service desync will be prepared right here. + // AZSeparatedResourceTopology does not update resource quota. Therefore the service desync needs to be queued right here. if resInfo.Topology == liquid.AZSeparatedResourceTopology { var serviceID db.ProjectServiceID err := tx.SelectOne(&serviceID, `SELECT service_id FROM project_resources WHERE id = $1`, resourceID) @@ -199,7 +199,7 @@ func ApplyComputedProjectQuota(serviceType db.ServiceType, resourceName liquid.R } err = sqlext.WithPreparedStatement(tx, acpqUpdateProjectQuotaQuery, func(stmt *sql.Stmt) error { - // Skip resources with AZSeparated toplogy. The quota scrape would receive a resource nil value, while ACPQ calculates qouta. + // Skip resources with AZSeparatedResourceTopology. The quota scrape would receive a resource nil value, while ACPQ calculates qouta. // This would lead to unnecessary quota syncs with the backend, because backendQuota != quota. if resInfo.Topology == liquid.AZSeparatedResourceTopology { return nil