diff --git a/docs/liquids/nova.md b/docs/liquids/nova.md
new file mode 100644
index 000000000..0883bf520
--- /dev/null
+++ b/docs/liquids/nova.md
@@ -0,0 +1,41 @@
+# Liquid: `nova`
+
+This liquid provides support for the compute service Nova.
+
+- The suggested service type is `liquid-nova`.
+- The suggested area is `compute`.
+
+## Service-specific configuration
+
+| Field | Type | Description |
+| ----- | ---- | ----------- |
+| `hypervisor_selection.hypervisor_type_pattern` | regexp | Only match hypervisors with a hypervisor_type attribute matching this pattern. |
+| `hypervisor_selection.required_traits` | []string | Only those hypervisors will be considered whose resource providers have all of the traits without `!` prefix and none of those with `!` prefix. |
+| `hypervisor_selection.shadowing_traits` | []string | If a hypervisor matches any of the rules in this configuration field (using the same logic as above for `required_traits`), the hypervisor will be considered shadowed. Its capacity will not be counted. |
+| `hypervisor_selection.aggregate_name_pattern` | regexp | Only match hypervisors that reside in an aggregate matching this pattern. If a hypervisor resides in multiple matching aggregates, an error is raised. |
+| `flavor_selection.required_extra_specs` | map[string]string | Only match flavors that have all of these extra specs. |
+| `flavor_selection.excluded_extra_specs` | map[string]string | Exclude flavors that have any of these extra specs. |
+| `pooled_cores_resource` | string | Name of the pooled cores resource. |
+| `pooled_instances_resource` | string | Name of the pooled instances resource. |
+| `pooled_ram_resource` | string | Name of the pooled ram resource. |
+| `with_subcapacities` | boolean | If true, subcapacities are reported. |
+| `with_subresources` | boolean | If true, subresources are reported. |
+| `binpack_behavior.score_ignores_cores`
`binpack_behavior.score_ignores_disk`
`binpack_behavior.score_ignores_ram` | boolean | If true, when ranking nodes during placement, do not include the respective dimension in the score. |
+| `ignore_traits` | []string | Traits that should be ignored during confirmation that all pooled flavors agree on which trait-match extra specs they use. |
+
+## Resources
+
+TODO: @majewsky please assist here
+
+| Resource | Unit | Capabilities |
+| --- | --- | --- |
+| `cores` | None | HasCapacity = true, HasQuota = true |
+| `ram` | MiB | HasCapacity = true, HasQuota = true |
+| `instances` | None | HasCapacity = true, HasQuota = true |
+| `server_groups` | None | HasCapacity = false, HasQuota = true |
+| `server_group_members` | None | HasCapacity = false, HasQuota = true |
+| `instances_$FLAVOR_NAME` | None | HasCapacity = true, HasQuota = true |
+
+## Capacity calculation
+
+TODO: @majewsky please assist here
\ No newline at end of file
diff --git a/internal/liquids/ironic/liquid.go b/internal/liquids/ironic/liquid.go
index 3ef7fe892..e5525b80f 100644
--- a/internal/liquids/ironic/liquid.go
+++ b/internal/liquids/ironic/liquid.go
@@ -32,7 +32,7 @@ import (
"github.com/sapcc/go-api-declarations/liquid"
"github.com/sapcc/limes/internal/liquids"
- "github.com/sapcc/limes/internal/plugins/nova"
+ "github.com/sapcc/limes/internal/liquids/nova"
)
type Logic struct {
diff --git a/internal/plugins/nova/binpack_simulation.go b/internal/liquids/nova/binpack_simulation.go
similarity index 97%
rename from internal/plugins/nova/binpack_simulation.go
rename to internal/liquids/nova/binpack_simulation.go
index 154372c86..e1e6f3865 100644
--- a/internal/plugins/nova/binpack_simulation.go
+++ b/internal/liquids/nova/binpack_simulation.go
@@ -28,19 +28,19 @@ import (
"github.com/gophercloud/gophercloud/v2/openstack/compute/v2/flavors"
"github.com/gophercloud/gophercloud/v2/openstack/placement/v1/resourceproviders"
- "github.com/sapcc/go-api-declarations/limes"
"github.com/sapcc/go-api-declarations/liquid"
"github.com/sapcc/go-bits/logg"
)
+// TODO: Remove yaml tags when switching to liquid-nova
// BinpackBehavior contains configuration parameters for the binpack simulation.
type BinpackBehavior struct {
// When ranking nodes during placement, do not include the VCPU count dimension in the score.
- ScoreIgnoresCores bool `yaml:"score_ignores_cores"`
+ ScoreIgnoresCores bool `yaml:"score_ignores_cores" json:"score_ignores_cores"`
// When ranking nodes during placement, do not include the disk size dimension in the score.
- ScoreIgnoresDisk bool `yaml:"score_ignores_disk"`
+ ScoreIgnoresDisk bool `yaml:"score_ignores_disk" json:"score_ignores_disk"`
// When ranking nodes during placement, do not include the RAM size dimension in the score.
- ScoreIgnoresRAM bool `yaml:"score_ignores_ram"`
+ ScoreIgnoresRAM bool `yaml:"score_ignores_ram" json:"score_ignores_ram"`
}
// BinpackHypervisor models an entire Nova hypervisor for the purposes of the
@@ -146,7 +146,7 @@ func PrepareHypervisorForBinpacking(h MatchingHypervisor) (BinpackHypervisor, er
}
// RenderDebugView prints an overview of the placements in this hypervisor on several logg.Debug lines.
-func (h BinpackHypervisor) RenderDebugView(az limes.AvailabilityZone) {
+func (h BinpackHypervisor) RenderDebugView(az liquid.AvailabilityZone) {
shortID := h.Match.Hypervisor.Service.Host
logg.Debug("[%s][%s] %s", az, shortID, h.Match.Hypervisor.Description())
for idx, n := range h.Nodes {
diff --git a/internal/liquids/nova/capacity.go b/internal/liquids/nova/capacity.go
new file mode 100644
index 000000000..d4e415f61
--- /dev/null
+++ b/internal/liquids/nova/capacity.go
@@ -0,0 +1,572 @@
+/*******************************************************************************
+*
+* Copyright 2019-2024 SAP SE
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You should have received a copy of the License along with this
+* program. If not, you may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*
+*******************************************************************************/
+
+package nova
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "slices"
+ "strings"
+
+ "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/flavors"
+ "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers"
+ "github.com/sapcc/go-api-declarations/liquid"
+ "github.com/sapcc/go-bits/logg"
+
+ "github.com/sapcc/limes/internal/core"
+ "github.com/sapcc/limes/internal/liquids"
+)
+
+// PartialCapacity describes compute capacity at a level below the entire
+// cluster (e.g. for a single hypervisor, aggregate or AZ).
+type PartialCapacity struct {
+ VCPUs PartialCapacityMetric
+ MemoryMB PartialCapacityMetric
+ LocalGB PartialCapacityMetric
+ RunningVMs uint64
+ MatchingAggregates map[string]bool
+ Subcapacities []any // only filled on AZ level
+}
+
+func (c *PartialCapacity) Add(other PartialCapacity) {
+ c.VCPUs.Capacity += other.VCPUs.Capacity
+ c.VCPUs.Usage += other.VCPUs.Usage
+ c.MemoryMB.Capacity += other.MemoryMB.Capacity
+ c.MemoryMB.Usage += other.MemoryMB.Usage
+ c.LocalGB.Capacity += other.LocalGB.Capacity
+ c.LocalGB.Usage += other.LocalGB.Usage
+ c.RunningVMs += other.RunningVMs
+
+ if c.MatchingAggregates == nil {
+ c.MatchingAggregates = make(map[string]bool)
+ }
+ for aggrName, matches := range other.MatchingAggregates {
+ if matches {
+ c.MatchingAggregates[aggrName] = true
+ }
+ }
+}
+
+func (c PartialCapacity) CappedToUsage() PartialCapacity {
+ return PartialCapacity{
+ VCPUs: c.VCPUs.CappedToUsage(),
+ MemoryMB: c.MemoryMB.CappedToUsage(),
+ LocalGB: c.LocalGB.CappedToUsage(),
+ RunningVMs: c.RunningVMs,
+ MatchingAggregates: c.MatchingAggregates,
+ Subcapacities: c.Subcapacities,
+ }
+}
+
+// TODO: Remove when switching to liquid-nova
+func (c PartialCapacity) DeprecatedIntoCapacityData(resourceName string, maxRootDiskSize float64, subcapacities []any) core.CapacityData { //nolint:dupl
+ switch resourceName {
+ case "cores":
+ return core.CapacityData{
+ Capacity: c.VCPUs.Capacity,
+ Usage: &c.VCPUs.Usage,
+ Subcapacities: subcapacities,
+ }
+ case "ram":
+ return core.CapacityData{
+ Capacity: c.MemoryMB.Capacity,
+ Usage: &c.MemoryMB.Usage,
+ Subcapacities: subcapacities,
+ }
+ case "instances":
+ amount := 10000 * uint64(len(c.MatchingAggregates))
+ if maxRootDiskSize != 0 {
+ maxAmount := uint64(float64(c.LocalGB.Capacity) / maxRootDiskSize)
+ if amount > maxAmount {
+ amount = maxAmount
+ }
+ }
+ return core.CapacityData{
+ Capacity: amount,
+ Usage: &c.RunningVMs,
+ Subcapacities: subcapacities,
+ }
+ default:
+ panic(fmt.Sprintf("called with unknown resourceName %q", resourceName))
+ }
+}
+
+// TODO: Remove nolint:dupl when switching to liquid-nova
+func (c PartialCapacity) IntoCapacityData(resourceName string, maxRootDiskSize float64, subcapacities []liquid.Subcapacity) liquid.AZResourceCapacityReport { //nolint:dupl
+ switch resourceName {
+ case "cores":
+ return liquid.AZResourceCapacityReport{
+ Capacity: c.VCPUs.Capacity,
+ Usage: &c.VCPUs.Usage,
+ Subcapacities: subcapacities,
+ }
+ case "ram":
+ return liquid.AZResourceCapacityReport{
+ Capacity: c.MemoryMB.Capacity,
+ Usage: &c.MemoryMB.Usage,
+ Subcapacities: subcapacities,
+ }
+ case "instances":
+ amount := 10000 * uint64(len(c.MatchingAggregates))
+ if maxRootDiskSize != 0 {
+ maxAmount := uint64(float64(c.LocalGB.Capacity) / maxRootDiskSize)
+ if amount > maxAmount {
+ amount = maxAmount
+ }
+ }
+ return liquid.AZResourceCapacityReport{
+ Capacity: amount,
+ Usage: &c.RunningVMs,
+ Subcapacities: subcapacities,
+ }
+ default:
+ panic(fmt.Sprintf("called with unknown resourceName %q", resourceName))
+ }
+}
+
+// PartialCapacityMetric appears in type PartialCapacity.
+type PartialCapacityMetric struct {
+ Capacity uint64
+ Usage uint64
+}
+
+func (m PartialCapacityMetric) CappedToUsage() PartialCapacityMetric {
+ return PartialCapacityMetric{
+ Capacity: min(m.Capacity, m.Usage),
+ Usage: m.Usage,
+ }
+}
+
+// ScanCapacity implements the liquidapi.Logic interface.
+func (l *Logic) ScanCapacity(ctx context.Context, req liquid.ServiceCapacityRequest, serviceInfo liquid.ServiceInfo) (liquid.ServiceCapacityReport, error) {
+ // enumerate matching flavors, divide into split and pooled flavors;
+ // ("split flavors" are those with separate instance quota, as opposed to
+ // "pooled flavors" that share a common pool of CPU/instances/RAM capacity)
+ //
+ // also, for the pooled instances capacity, we need to know the max root disk size on public pooled flavors
+ var (
+ splitFlavors []flavors.Flavor
+ maxRootDiskSize = uint64(0)
+ )
+ pooledExtraSpecs := make(map[string]string)
+ err := l.FlavorSelection.ForeachFlavor(ctx, l.NovaV2, func(f flavors.Flavor) error {
+ switch {
+ case IsIronicFlavor(f):
+ // ignore Ironic flavors
+ case IsSplitFlavor(f):
+ splitFlavors = append(splitFlavors, f)
+ case f.IsPublic:
+ // require that all pooled flavors agree on the same trait-match extra specs
+ for spec, val := range f.ExtraSpecs {
+ if !strings.HasPrefix(spec, "trait:") || slices.Contains(l.IgnoreTraits, spec) {
+ continue
+ }
+ if pooledVal, exists := pooledExtraSpecs[spec]; !exists {
+ pooledExtraSpecs[spec] = val
+ } else if val != pooledVal {
+ return fmt.Errorf("conflict: pooled flavors both require extra spec %s values %s and %s", spec, val, pooledVal)
+ }
+ }
+ // only public flavor contribute to the `maxRootDiskSize` calculation (in
+ // the wild, we have seen non-public legacy flavors with wildly large
+ // disk sizes that would throw off all estimates derived from this number)
+ maxRootDiskSize = max(maxRootDiskSize, liquids.AtLeastZero(f.Disk))
+ }
+ return nil
+ })
+ if err != nil {
+ return liquid.ServiceCapacityReport{}, err
+ }
+ if l.PooledCoresResourceName != "" && maxRootDiskSize == 0 {
+ return liquid.ServiceCapacityReport{}, errors.New("pooled capacity requested, but there are no matching flavors")
+ }
+ logg.Debug("max root disk size = %d GiB", maxRootDiskSize)
+
+ // collect all relevant resource demands
+ coresDemand := req.DemandByResource[l.PooledCoresResourceName]
+ instancesDemand := req.DemandByResource[l.PooledCoresResourceName]
+ ramDemand := req.DemandByResource[l.PooledCoresResourceName]
+
+ if l.PooledCoresResourceName == "" {
+ coresDemand.OvercommitFactor = 1
+ }
+ logg.Debug("pooled cores demand: %#v (overcommit factor = %g)", coresDemand.PerAZ, coresDemand.OvercommitFactor)
+ logg.Debug("pooled instances demand: %#v", instancesDemand.PerAZ)
+ logg.Debug("pooled RAM demand: %#v", ramDemand.PerAZ)
+
+ demandByFlavorName := make(map[string]liquid.ResourceDemand)
+ for _, f := range splitFlavors {
+ resourceName := ResourceNameForFlavor(f.Name)
+ demand := req.DemandByResource[resourceName]
+ if demand.OvercommitFactor != 1 && demand.OvercommitFactor != 0 {
+ return liquid.ServiceCapacityReport{}, fmt.Errorf("overcommit on compute/%s is not supported", resourceName)
+ }
+ demandByFlavorName[f.Name] = demand
+ }
+ logg.Debug("binpackable flavors: %#v", splitFlavors)
+ logg.Debug("demand for binpackable flavors: %#v", demandByFlavorName)
+
+ // enumerate matching hypervisors, prepare data structures for binpacking
+ hypervisorsByAZ := make(map[liquid.AvailabilityZone]BinpackHypervisors)
+ shadowedHypervisorsByAZ := make(map[liquid.AvailabilityZone][]MatchingHypervisor)
+ isShadowedHVHostname := make(map[string]bool)
+ err = l.HypervisorSelection.ForeachHypervisor(ctx, l.NovaV2, l.PlacementV1, func(h MatchingHypervisor) error {
+ // ignore HVs that are not associated with an aggregate and AZ
+ if !h.CheckTopology() {
+ return nil
+ }
+
+ if h.ShadowedByTrait == "" {
+ bh, err := PrepareHypervisorForBinpacking(h)
+ if err != nil {
+ return err
+ }
+ hypervisorsByAZ[h.AvailabilityZone] = append(hypervisorsByAZ[h.AvailabilityZone], bh)
+
+ hc := h.PartialCapacity()
+ logg.Debug("%s in %s reports %s capacity, %s used, %d nodes, %s max unit", h.Hypervisor.Description(), h.AvailabilityZone,
+ BinpackVector[uint64]{VCPUs: hc.VCPUs.Capacity, MemoryMB: hc.MemoryMB.Capacity, LocalGB: hc.LocalGB.Capacity},
+ BinpackVector[uint64]{VCPUs: hc.VCPUs.Usage, MemoryMB: hc.MemoryMB.Usage, LocalGB: hc.LocalGB.Usage},
+ len(bh.Nodes), bh.Nodes[0].Capacity,
+ )
+ } else {
+ shadowedHypervisorsByAZ[h.AvailabilityZone] = append(shadowedHypervisorsByAZ[h.AvailabilityZone], h)
+ isShadowedHVHostname[h.Hypervisor.HypervisorHostname] = true
+ logg.Debug("%s in %s is shadowed by trait %s", h.Hypervisor.Description(), h.AvailabilityZone, h.ShadowedByTrait)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return liquid.ServiceCapacityReport{}, err
+ }
+
+ // during binpacking, place instances of large flavors first to achieve optimal results
+ slices.SortFunc(splitFlavors, func(lhs, rhs flavors.Flavor) int {
+ //NOTE: this returns `rhs-lhs` instead of `lhs-rhs` to achieve descending order
+ if lhs.VCPUs != rhs.VCPUs {
+ return rhs.VCPUs - lhs.VCPUs
+ }
+ if lhs.RAM != rhs.RAM {
+ return rhs.RAM - lhs.RAM
+ }
+ return rhs.Disk - lhs.Disk
+ })
+
+ // if Nova can tell us where existing instances are running, we prefer this
+ // information since it will make our simulation more accurate
+ instancesPlacedOnShadowedHypervisors := make(map[string]map[liquid.AvailabilityZone]uint64) // first key is flavor name
+ bb := l.BinpackBehavior
+ for _, flavor := range splitFlavors {
+ shadowedForThisFlavor := make(map[liquid.AvailabilityZone]uint64)
+
+ // list all servers for this flavor, parsing only placement information from the result
+ listOpts := servers.ListOpts{
+ Flavor: flavor.ID,
+ AllTenants: true,
+ }
+ allPages, err := servers.List(l.NovaV2, listOpts).AllPages(ctx)
+ if err != nil {
+ return liquid.ServiceCapacityReport{}, fmt.Errorf("while listing active instances for flavor %s: %w", flavor.Name, err)
+ }
+ var instances []struct {
+ ID string `json:"id"`
+ AZ liquid.AvailabilityZone `json:"OS-EXT-AZ:availability_zone"`
+ HypervisorHostname string `json:"OS-EXT-SRV-ATTR:hypervisor_hostname"`
+ }
+ err = servers.ExtractServersInto(allPages, &instances)
+ if err != nil {
+ return liquid.ServiceCapacityReport{}, fmt.Errorf("while listing active instances for flavor %s: %w", flavor.Name, err)
+ }
+
+ for _, instance := range instances {
+ az := instance.AZ
+ if !slices.Contains(req.AllAZs, az) {
+ az = liquid.AvailabilityZoneUnknown
+ }
+
+ // If we are absolutely sure that this instance is placed on a shadowed hypervisor,
+ // we remember this and have the final capacity take those into account without
+ // including them in the binpacking simulation.
+ if isShadowedHVHostname[instance.HypervisorHostname] {
+ shadowedForThisFlavor[az]++
+ }
+
+ // If the instance is placed on a known hypervisor, place it right now.
+ // The number of instances thus placed will be skipped below to avoid double counting.
+ for _, hv := range hypervisorsByAZ[az] {
+ if hv.Match.Hypervisor.HypervisorHostname == instance.HypervisorHostname {
+ var zero BinpackVector[uint64]
+ placed := BinpackHypervisors{hv}.PlaceOneInstance(flavor, "USED", coresDemand.OvercommitFactor, zero, bb)
+ if !placed {
+ logg.Debug("could not simulate placement of known instance %s on %s", instance.ID, hv.Match.Hypervisor.Description())
+ }
+ }
+ break
+ }
+ }
+
+ if len(shadowedForThisFlavor) > 0 {
+ instancesPlacedOnShadowedHypervisors[flavor.Name] = shadowedForThisFlavor
+ }
+ }
+ logg.Debug("instances for split flavors placed on shadowed hypervisors: %v", instancesPlacedOnShadowedHypervisors)
+
+ // foreach AZ, place demanded split instances in order of priority, unless
+ // blocked by pooled instances of equal or higher priority
+ for az, hypervisors := range hypervisorsByAZ {
+ canPlaceFlavor := make(map[string]bool)
+ for _, flavor := range splitFlavors {
+ canPlaceFlavor[flavor.Name] = true
+ }
+
+ // phase 1: block existing usage
+ blockedCapacity := BinpackVector[uint64]{
+ VCPUs: coresDemand.OvercommitFactor.ApplyInReverseTo(coresDemand.PerAZ[az].Usage),
+ MemoryMB: ramDemand.PerAZ[az].Usage,
+ LocalGB: instancesDemand.PerAZ[az].Usage * maxRootDiskSize,
+ }
+ logg.Debug("[%s] blockedCapacity in phase 1: %s", az, blockedCapacity.String())
+ for _, flavor := range splitFlavors {
+ // do not place instances that have already been placed in the simulation,
+ // as well as instances that run on hypervisors that do not participate in the binpacking simulation
+ placedUsage := hypervisors.PlacementCountForFlavor(flavor.Name)
+ shadowedUsage := instancesPlacedOnShadowedHypervisors[flavor.Name][az]
+ unplacedUsage := saturatingSub(demandByFlavorName[flavor.Name].PerAZ[az].Usage, placedUsage+shadowedUsage)
+ if !hypervisors.PlaceSeveralInstances(flavor, "used", coresDemand.OvercommitFactor, blockedCapacity, bb, unplacedUsage) {
+ canPlaceFlavor[flavor.Name] = false
+ }
+ }
+
+ // phase 2: block confirmed, but unused commitments
+ blockedCapacity.VCPUs += coresDemand.OvercommitFactor.ApplyInReverseTo(coresDemand.PerAZ[az].UnusedCommitments)
+ blockedCapacity.MemoryMB += ramDemand.PerAZ[az].UnusedCommitments
+ blockedCapacity.LocalGB += instancesDemand.PerAZ[az].UnusedCommitments * maxRootDiskSize
+ logg.Debug("[%s] blockedCapacity in phase 2: %s", az, blockedCapacity.String())
+ for _, flavor := range splitFlavors {
+ if !hypervisors.PlaceSeveralInstances(flavor, "committed", coresDemand.OvercommitFactor, blockedCapacity, bb, demandByFlavorName[flavor.Name].PerAZ[az].UnusedCommitments) {
+ canPlaceFlavor[flavor.Name] = false
+ }
+ }
+
+ // phase 3: block pending commitments
+ blockedCapacity.VCPUs += coresDemand.OvercommitFactor.ApplyInReverseTo(coresDemand.PerAZ[az].PendingCommitments)
+ blockedCapacity.MemoryMB += ramDemand.PerAZ[az].PendingCommitments
+ blockedCapacity.LocalGB += instancesDemand.PerAZ[az].PendingCommitments * maxRootDiskSize
+ logg.Debug("[%s] blockedCapacity in phase 3: %s", az, blockedCapacity.String())
+ for _, flavor := range splitFlavors {
+ if !hypervisors.PlaceSeveralInstances(flavor, "pending", coresDemand.OvercommitFactor, blockedCapacity, bb, demandByFlavorName[flavor.Name].PerAZ[az].PendingCommitments) {
+ canPlaceFlavor[flavor.Name] = false
+ }
+ }
+
+ // check how many instances we could place until now
+ initiallyPlacedInstances := make(map[string]float64)
+ sumInitiallyPlacedInstances := uint64(0)
+ totalPlacedInstances := make(map[string]float64) // these two will diverge in the final round of placements
+ var splitFlavorsUsage BinpackVector[uint64]
+ for _, flavor := range splitFlavors {
+ count := hypervisors.PlacementCountForFlavor(flavor.Name)
+ initiallyPlacedInstances[flavor.Name] = max(float64(count), 0.1)
+ sumInitiallyPlacedInstances += count
+ totalPlacedInstances[flavor.Name] = float64(count)
+ // The max(..., 0.1) is explained below.
+
+ splitFlavorsUsage.VCPUs += coresDemand.OvercommitFactor.ApplyInReverseTo(count * liquids.AtLeastZero(flavor.VCPUs))
+ splitFlavorsUsage.MemoryMB += count * liquids.AtLeastZero(flavor.RAM)
+ splitFlavorsUsage.LocalGB += count * liquids.AtLeastZero(flavor.Disk)
+ }
+
+ // for the upcoming final fill, we want to block capacity in such a way that
+ // the reported capacity is fairly divided between pooled and split flavors,
+ // in a way that matches the existing usage distribution, that is:
+ //
+ // capacity blocked for pooled flavors = capacity * (pooled usage / total usage)
+ // ------------
+ // ^ this is in blockedCapacity
+ //
+ totalUsageUntilNow := blockedCapacity.Add(splitFlavorsUsage)
+ if !totalUsageUntilNow.IsAnyZero() {
+ // we can only do this if .Div() does not cause a divide-by-zero, otherwise we continue with blockedCapacity = 0
+ blockedCapacity = hypervisors.TotalCapacity().AsFloat().Mul(blockedCapacity.Div(totalUsageUntilNow)).AsUint()
+ }
+ logg.Debug("[%s] usage by split flavors after phase 3: %s", az, splitFlavorsUsage.String())
+ logg.Debug("[%s] blockedCapacity in final fill: %s (totalCapacity = %s)", az, blockedCapacity.String(), hypervisors.TotalCapacity().String())
+
+ // fill up with padding in a fair way as long as there is space left,
+ // except if there is pooling and we don't have any demand at all on the split flavors
+ // (in order to avoid weird numerical edge cases in the `blockedCapacity` calculation above)
+ fillUp := l.PooledCoresResourceName == "" || sumInitiallyPlacedInstances > 0
+ // This uses the Sainte-Laguë method designed for allocation of parliament
+ // seats. In this case, the parties are the flavors, the votes are what we
+ // allocated based on demand (`initiallyPlacedInstances`), and the seats are
+ // the placements (`totalPlacedInstances`).
+ for fillUp {
+ var (
+ bestFlavor *flavors.Flavor
+ bestScore = -1.0
+ )
+ for _, flavor := range splitFlavors {
+ if !canPlaceFlavor[flavor.Name] {
+ continue
+ }
+ score := (initiallyPlacedInstances[flavor.Name]) / (2*totalPlacedInstances[flavor.Name] + 1)
+ // ^ This is why we adjusted all initiallyPlacedInstances[flavor.Name] = 0 to 0.1
+ // above. If the nominator of this fraction is 0 for multiple flavors, the first
+ // (biggest) flavor always wins unfairly. By adjusting to slightly away from zero,
+ // the scoring is more fair and stable.
+ if score > bestScore {
+ bestScore = score
+ flavor := flavor
+ bestFlavor = &flavor
+ }
+ }
+ if bestFlavor == nil {
+ // no flavor left that can be placed -> stop
+ break
+ } else {
+ if hypervisors.PlaceOneInstance(*bestFlavor, "padding", coresDemand.OvercommitFactor, blockedCapacity, bb) {
+ totalPlacedInstances[bestFlavor.Name]++
+ } else {
+ canPlaceFlavor[bestFlavor.Name] = false
+ }
+ }
+ }
+ } ////////// end of placement
+
+ // debug visualization of the binpack placement result
+ if logg.ShowDebug {
+ for az, hypervisors := range hypervisorsByAZ {
+ for _, hypervisor := range hypervisors {
+ hypervisor.RenderDebugView(az)
+ }
+ }
+ }
+
+ // compile result for pooled resources
+ capacities := make(map[liquid.ResourceName]*liquid.ResourceCapacityReport, len(splitFlavors)+3)
+ if l.PooledCoresResourceName != "" {
+ capacities[l.PooledCoresResourceName] = &liquid.ResourceCapacityReport{
+ PerAZ: make(map[liquid.AvailabilityZone]*liquid.AZResourceCapacityReport),
+ }
+ capacities[l.PooledInstancesResourceName] = &liquid.ResourceCapacityReport{
+ PerAZ: make(map[liquid.AvailabilityZone]*liquid.AZResourceCapacityReport),
+ }
+ capacities[l.PooledRAMResourceName] = &liquid.ResourceCapacityReport{
+ PerAZ: make(map[liquid.AvailabilityZone]*liquid.AZResourceCapacityReport),
+ }
+
+ for az, hypervisors := range hypervisorsByAZ {
+ var (
+ azCapacity PartialCapacity
+ builder PooledSubcapacityBuilder
+ )
+ for _, h := range hypervisors {
+ azCapacity.Add(h.Match.PartialCapacity())
+ if l.WithSubcapacities {
+ err = builder.AddHypervisor(h.Match, float64(maxRootDiskSize))
+ if err != nil {
+ return liquid.ServiceCapacityReport{}, fmt.Errorf("could not add hypervisor as subcapacity: %w", err)
+ }
+ }
+ }
+ for _, h := range shadowedHypervisorsByAZ[az] {
+ azCapacity.Add(h.PartialCapacity().CappedToUsage())
+ if l.WithSubcapacities {
+ err = builder.AddHypervisor(h, float64(maxRootDiskSize))
+ if err != nil {
+ return liquid.ServiceCapacityReport{}, fmt.Errorf("could not add hypervisor as subcapacity: %w", err)
+ }
+ }
+ }
+
+ capacities[l.PooledCoresResourceName].PerAZ[az] = pointerTo(azCapacity.IntoCapacityData("cores", float64(maxRootDiskSize), builder.CoresSubcapacities))
+ capacities[l.PooledInstancesResourceName].PerAZ[az] = pointerTo(azCapacity.IntoCapacityData("instances", float64(maxRootDiskSize), builder.InstancesSubcapacities))
+ capacities[l.PooledRAMResourceName].PerAZ[az] = pointerTo(azCapacity.IntoCapacityData("ram", float64(maxRootDiskSize), builder.RAMSubcapacities))
+ for _, flavor := range splitFlavors {
+ count := hypervisors.PlacementCountForFlavor(flavor.Name)
+ capacities[l.PooledCoresResourceName].PerAZ[az].Capacity -= coresDemand.OvercommitFactor.ApplyInReverseTo(count * liquids.AtLeastZero(flavor.VCPUs))
+ capacities[l.PooledInstancesResourceName].PerAZ[az].Capacity-- //TODO: not accurate when uint64(flavor.Disk) != maxRootDiskSize
+ capacities[l.PooledRAMResourceName].PerAZ[az].Capacity -= count * liquids.AtLeastZero(flavor.RAM)
+ }
+ }
+ }
+
+ // compile result for split flavors
+ slices.SortFunc(splitFlavors, func(lhs, rhs flavors.Flavor) int {
+ return strings.Compare(lhs.Name, rhs.Name)
+ })
+ for idx, flavor := range splitFlavors {
+ resourceName := ResourceNameForFlavor(flavor.Name)
+ capacities[resourceName] = &liquid.ResourceCapacityReport{
+ PerAZ: make(map[liquid.AvailabilityZone]*liquid.AZResourceCapacityReport),
+ }
+
+ for az, hypervisors := range hypervisorsByAZ {
+ // if we could not report subcapacities on pooled resources, report them on
+ // the first flavor in alphabetic order (this is why we just sorted them)
+ var builder SplitFlavorSubcapacityBuilder
+ if l.WithSubcapacities && l.PooledCoresResourceName == "" && idx == 0 {
+ for _, h := range hypervisors {
+ err = builder.AddHypervisor(h.Match)
+ if err != nil {
+ return liquid.ServiceCapacityReport{}, fmt.Errorf("could not add hypervisor as subcapacity: %w", err)
+ }
+ }
+ }
+
+ capacities[resourceName].PerAZ[az] = &liquid.AZResourceCapacityReport{
+ Capacity: hypervisors.PlacementCountForFlavor(flavor.Name),
+ Subcapacities: builder.Subcapacities,
+ }
+ }
+
+ // if shadowed hypervisors are still carrying instances of this flavor,
+ // increase the capacity accordingly to more accurately represent the
+ // free capacity on the unshadowed hypervisors
+ for az, shadowedCount := range instancesPlacedOnShadowedHypervisors[flavor.Name] {
+ if capacities[resourceName].PerAZ[az] == nil {
+ capacities[resourceName].PerAZ[az] = &liquid.AZResourceCapacityReport{
+ Capacity: shadowedCount,
+ }
+ } else {
+ capacities[resourceName].PerAZ[az].Capacity += shadowedCount
+ }
+ }
+ }
+
+ return liquid.ServiceCapacityReport{
+ InfoVersion: serviceInfo.Version,
+ Resources: capacities,
+ }, nil
+}
+
+func pointerTo[T any](value T) *T {
+ return &value
+}
+
+// Like `lhs - rhs`, but never underflows below 0.
+func saturatingSub(lhs, rhs uint64) uint64 {
+ if lhs < rhs {
+ return 0
+ }
+ return lhs - rhs
+}
diff --git a/internal/plugins/nova/constants.go b/internal/liquids/nova/constants.go
similarity index 100%
rename from internal/plugins/nova/constants.go
rename to internal/liquids/nova/constants.go
diff --git a/internal/plugins/nova/flavor_selection.go b/internal/liquids/nova/flavor_selection.go
similarity index 97%
rename from internal/plugins/nova/flavor_selection.go
rename to internal/liquids/nova/flavor_selection.go
index 1b90f3a48..e917cde90 100644
--- a/internal/plugins/nova/flavor_selection.go
+++ b/internal/liquids/nova/flavor_selection.go
@@ -30,6 +30,7 @@ import (
"github.com/sapcc/go-api-declarations/liquid"
)
+// TODO: Remove yaml tags when switching to liquid-nova
// FlavorSelection describes a set of public flavors.
//
// This is used for matching flavors that we enumerate via the flavor API
@@ -37,9 +38,9 @@ import (
// name, type FlavorNameSelection is used.
type FlavorSelection struct {
// Only match flavors that have all of these extra specs.
- RequiredExtraSpecs map[string]string `yaml:"required_extra_specs"`
+ RequiredExtraSpecs map[string]string `yaml:"required_extra_specs" json:"required_extra_specs"`
// Exclude flavors that have any of these extra specs.
- ExcludedExtraSpecs map[string]string `yaml:"excluded_extra_specs"`
+ ExcludedExtraSpecs map[string]string `yaml:"excluded_extra_specs" json:"excluded_extra_specs"`
}
func (s FlavorSelection) matchesExtraSpecs(specs map[string]string) bool {
diff --git a/internal/plugins/nova/gophercloud_fixes.go b/internal/liquids/nova/gophercloud_fixes.go
similarity index 100%
rename from internal/plugins/nova/gophercloud_fixes.go
rename to internal/liquids/nova/gophercloud_fixes.go
diff --git a/internal/plugins/nova/hypervisor_selection.go b/internal/liquids/nova/hypervisor_selection.go
similarity index 97%
rename from internal/plugins/nova/hypervisor_selection.go
rename to internal/liquids/nova/hypervisor_selection.go
index a4318e62c..8bd574f01 100644
--- a/internal/plugins/nova/hypervisor_selection.go
+++ b/internal/liquids/nova/hypervisor_selection.go
@@ -36,19 +36,20 @@ import (
"github.com/sapcc/go-bits/regexpext"
)
+// TODO: Remove yaml tags when switching to liquid-nova
// HypervisorSelection describes a set of hypervisors.
type HypervisorSelection struct {
// Only match hypervisors with a hypervisor_type attribute matching this pattern.
- HypervisorTypeRx regexpext.PlainRegexp `yaml:"hypervisor_type_pattern"`
+ HypervisorTypeRx regexpext.PlainRegexp `yaml:"hypervisor_type_pattern" json:"hypervisor_type_pattern"`
// Only match hypervisors that have any of these traits.
// Trait names can include a `!` prefix to invert the match.
- RequiredTraits []string `yaml:"required_traits"`
+ RequiredTraits []string `yaml:"required_traits" json:"required_traits"`
// Set the MatchingHypervisor.ShadowedByTrait field on hypervisors that have any of these traits.
// Trait names can include a `!` prefix to invert the match.
- ShadowingTraits []string `yaml:"shadowing_traits"`
+ ShadowingTraits []string `yaml:"shadowing_traits" json:"shadowing_traits"`
// Only match hypervisors that reside in an aggregate matching this pattern.
// If a hypervisor resides in multiple matching aggregates, an error is raised.
- AggregateNameRx regexpext.PlainRegexp `yaml:"aggregate_name_pattern"`
+ AggregateNameRx regexpext.PlainRegexp `yaml:"aggregate_name_pattern" json:"aggregate_name_pattern"`
}
// ForeachHypervisor lists all Nova hypervisors matching this
diff --git a/internal/plugins/nova/hypervisor_subcapacity.go b/internal/liquids/nova/hypervisor_subcapacity.go
similarity index 55%
rename from internal/plugins/nova/hypervisor_subcapacity.go
rename to internal/liquids/nova/hypervisor_subcapacity.go
index 26fb32510..72265c019 100644
--- a/internal/plugins/nova/hypervisor_subcapacity.go
+++ b/internal/liquids/nova/hypervisor_subcapacity.go
@@ -20,7 +20,11 @@
package nova
import (
+ "encoding/json"
+ "fmt"
+
"github.com/sapcc/go-api-declarations/limes"
+ "github.com/sapcc/go-api-declarations/liquid"
)
// Subcapacity is the structure for subcapacities reported by the "nova" capacity plugin.
@@ -39,8 +43,9 @@ type Subcapacity struct {
Traits []string `json:"traits"`
}
+// TODO: Remove when switching to liquid-nova
// PooledSubcapacityBuilder is used to build subcapacity lists for pooled resources.
-type PooledSubcapacityBuilder struct {
+type DeprecatedPooledSubcapacityBuilder struct {
// These are actually []Subcapacity, but we store them as []any because
// that's what goes into type core.CapacityData in the end.
CoresSubcapacities []any
@@ -48,7 +53,22 @@ type PooledSubcapacityBuilder struct {
RAMSubcapacities []any
}
-func (b *PooledSubcapacityBuilder) AddHypervisor(h MatchingHypervisor, maxRootDiskSize float64) {
+// PooledSubcapacityBuilder is used to build subcapacity lists for pooled resources.
+type PooledSubcapacityBuilder struct {
+ // These are actually []Subcapacity, but we store them as []any because
+ // that's what goes into type core.CapacityData in the end.
+ CoresSubcapacities []liquid.Subcapacity
+ InstancesSubcapacities []liquid.Subcapacity
+ RAMSubcapacities []liquid.Subcapacity
+}
+
+type SubcapacityAttributes struct {
+ AggregateName string `json:"aggregate_name"`
+ Traits []string `json:"traits"`
+}
+
+// TODO: Remove when switching to liquid-nova
+func (b *DeprecatedPooledSubcapacityBuilder) AddHypervisor(h MatchingHypervisor, maxRootDiskSize float64) {
pc := h.PartialCapacity()
hvCoresCapa := pc.IntoCapacityData("cores", maxRootDiskSize, nil)
@@ -80,13 +100,51 @@ func (b *PooledSubcapacityBuilder) AddHypervisor(h MatchingHypervisor, maxRootDi
})
}
+func (b *PooledSubcapacityBuilder) AddHypervisor(h MatchingHypervisor, maxRootDiskSize float64) error {
+ pc := h.PartialCapacity()
+
+ attrs := SubcapacityAttributes{
+ AggregateName: h.AggregateName,
+ Traits: h.Traits,
+ }
+ buf, err := json.Marshal(attrs)
+ if err != nil {
+ return fmt.Errorf("while serializing Subcapacity Attributes: %w", err)
+ }
+
+ hvCoresCapa := pc.IntoCapacityData("cores", maxRootDiskSize, nil)
+ b.CoresSubcapacities = append(b.CoresSubcapacities, liquid.Subcapacity{
+ Name: h.Hypervisor.Service.Host,
+ Capacity: hvCoresCapa.Capacity,
+ Usage: hvCoresCapa.Usage,
+ Attributes: json.RawMessage(buf),
+ })
+ hvInstancesCapa := pc.IntoCapacityData("instances", maxRootDiskSize, nil)
+ b.InstancesSubcapacities = append(b.InstancesSubcapacities, liquid.Subcapacity{
+ Name: h.Hypervisor.Service.Host,
+ Capacity: hvInstancesCapa.Capacity,
+ Usage: hvInstancesCapa.Usage,
+ Attributes: json.RawMessage(buf),
+ })
+ hvRAMCapa := pc.IntoCapacityData("ram", maxRootDiskSize, nil)
+ b.RAMSubcapacities = append(b.RAMSubcapacities, liquid.Subcapacity{
+ Name: h.Hypervisor.Service.Host,
+ Capacity: hvRAMCapa.Capacity,
+ Usage: hvRAMCapa.Usage,
+ Attributes: json.RawMessage(buf),
+ })
+
+ return nil
+}
+
+// TODO: Remove when switching to liquid-nova
// PooledSubcapacityBuilder is used to build subcapacity lists for split flavors.
// These subcapacities are reported on the first flavor in alphabetic order.
-type SplitFlavorSubcapacityBuilder struct {
+type DeprecatedSplitFlavorSubcapacityBuilder struct {
Subcapacities []any
}
-func (b *SplitFlavorSubcapacityBuilder) AddHypervisor(h MatchingHypervisor) {
+func (b *DeprecatedSplitFlavorSubcapacityBuilder) AddHypervisor(h MatchingHypervisor) {
pc := h.PartialCapacity()
b.Subcapacities = append(b.Subcapacities, Subcapacity{
ServiceHost: h.Hypervisor.Service.Host,
@@ -105,3 +163,45 @@ func (b *SplitFlavorSubcapacityBuilder) AddHypervisor(h MatchingHypervisor) {
Traits: h.Traits,
})
}
+
+// SplitSubcapacityBuilder is used to build subcapacity lists for split flavors.
+// These subcapacities are reported on the first flavor in alphabetic order.
+type SplitFlavorSubcapacityBuilder struct {
+ Subcapacities []liquid.Subcapacity
+}
+
+type SplitFlavorSubcapacityAttributes struct {
+ AggregateName string `json:"aggregate_name"`
+ CapacityVector *BinpackVector[uint64] `json:"capacity_vector,omitempty"`
+ UsageVector *BinpackVector[uint64] `json:"usage_vector,omitempty"`
+ Traits []string `json:"traits"`
+}
+
+func (b *SplitFlavorSubcapacityBuilder) AddHypervisor(h MatchingHypervisor) error {
+ pc := h.PartialCapacity()
+ attrs := SplitFlavorSubcapacityAttributes{
+ AggregateName: h.AggregateName,
+ CapacityVector: &BinpackVector[uint64]{
+ VCPUs: pc.VCPUs.Capacity,
+ MemoryMB: pc.MemoryMB.Capacity,
+ LocalGB: pc.LocalGB.Capacity,
+ },
+ UsageVector: &BinpackVector[uint64]{
+ VCPUs: pc.VCPUs.Usage,
+ MemoryMB: pc.MemoryMB.Usage,
+ LocalGB: pc.LocalGB.Usage,
+ },
+ Traits: h.Traits,
+ }
+ buf, err := json.Marshal(attrs)
+ if err != nil {
+ return fmt.Errorf("while serializing Subcapacity Attributes: %w", err)
+ }
+ b.Subcapacities = append(b.Subcapacities, liquid.Subcapacity{
+ Name: h.Hypervisor.Service.Host,
+ Capacity: 0,
+ Attributes: json.RawMessage(buf),
+ })
+
+ return nil
+}
diff --git a/internal/liquids/nova/liquid.go b/internal/liquids/nova/liquid.go
new file mode 100644
index 000000000..10453fa63
--- /dev/null
+++ b/internal/liquids/nova/liquid.go
@@ -0,0 +1,222 @@
+/*******************************************************************************
+*
+* Copyright 2024 SAP SE
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You should have received a copy of the License along with this
+* program. If not, you may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*
+*******************************************************************************/
+
+package nova
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "slices"
+ "strings"
+ "time"
+
+ "github.com/gophercloud/gophercloud/v2"
+ "github.com/gophercloud/gophercloud/v2/openstack"
+ "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/flavors"
+ "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/quotasets"
+ "github.com/sapcc/go-api-declarations/liquid"
+)
+
+type Logic struct {
+ // configuration
+ HypervisorSelection HypervisorSelection `json:"hypervisor_selection"`
+ FlavorSelection FlavorSelection `json:"flavor_selection"`
+ PooledCoresResourceName liquid.ResourceName `json:"pooled_cores_resource"`
+ PooledInstancesResourceName liquid.ResourceName `json:"pooled_instances_resource"`
+ PooledRAMResourceName liquid.ResourceName `json:"pooled_ram_resource"`
+ WithSubresources bool `json:"with_subresources"`
+ WithSubcapacities bool `json:"with_subcapacities"`
+ BinpackBehavior BinpackBehavior `json:"binpack_behavior"`
+ IgnoreTraits []string `json:"ignore_traits"`
+ // connections
+ NovaV2 *gophercloud.ServiceClient `json:"-"`
+ PlacementV1 *gophercloud.ServiceClient `json:"-"`
+ OSTypeProber *OSTypeProber `json:"-"`
+ ServerGroupProber *ServerGroupProber `json:"-"`
+ // computed state
+ ignoredFlavorNames []string `json:"-"`
+ hasPooledResource map[string]map[liquid.ResourceName]bool `json:"-"`
+ hwVersionResources []liquid.ResourceName `json:"-"`
+}
+
+// Init implements the liquidapi.Logic interface.
+func (l *Logic) Init(ctx context.Context, provider *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (err error) {
+ l.NovaV2, err = openstack.NewComputeV2(provider, eo)
+ if err != nil {
+ return err
+ }
+ l.NovaV2.Microversion = "2.61" // to include extra specs in flavors.ListDetail()
+
+ l.PlacementV1, err = openstack.NewPlacementV1(provider, eo)
+ if err != nil {
+ return err
+ }
+ l.PlacementV1.Microversion = "1.6" // for traits endpoint
+
+ cinderV3, err := openstack.NewBlockStorageV3(provider, eo)
+ if err != nil {
+ return err
+ }
+
+ glanceV2, err := openstack.NewImageV2(provider, eo)
+ if err != nil {
+ return err
+ }
+ l.OSTypeProber = NewOSTypeProber(l.NovaV2, cinderV3, glanceV2)
+ l.ServerGroupProber = NewServerGroupProber(l.NovaV2)
+
+ // SAPCC extension: Nova may report quotas with this name pattern in its quota sets and quota class sets.
+ // If it does, instances with flavors that have the extra spec `quota:hw_version` set to the first match
+ // group of this regexp will count towards those quotas instead of the regular `cores/instances/ram` quotas.
+ //
+ // This initialization enumerates which such pooled resources exist.
+ defaultQuotaClassSet, err := getDefaultQuotaClassSet(ctx, l.NovaV2)
+ if err != nil {
+ return fmt.Errorf("while enumerating default quotas: %w", err)
+ }
+ l.hasPooledResource = make(map[string]map[liquid.ResourceName]bool)
+ hwVersionResourceRx := regexp.MustCompile(`^hw_version_(\S+)_(cores|instances|ram)$`)
+ for resourceName := range defaultQuotaClassSet {
+ match := hwVersionResourceRx.FindStringSubmatch(resourceName)
+ if match == nil {
+ continue
+ }
+ hwVersion, baseResourceName := match[1], liquid.ResourceName(match[2])
+
+ l.hwVersionResources = append(l.hwVersionResources, liquid.ResourceName(resourceName))
+
+ if l.hasPooledResource[hwVersion] == nil {
+ l.hasPooledResource[hwVersion] = make(map[liquid.ResourceName]bool)
+ }
+ l.hasPooledResource[hwVersion][baseResourceName] = true
+ }
+
+ return FlavorSelection{}.ForeachFlavor(ctx, l.NovaV2, func(f flavors.Flavor) error {
+ if IsIronicFlavor(f) {
+ l.ignoredFlavorNames = append(l.ignoredFlavorNames, f.Name)
+ }
+ return nil
+ })
+}
+
+func getDefaultQuotaClassSet(ctx context.Context, novaV2 *gophercloud.ServiceClient) (map[string]any, error) {
+ url := novaV2.ServiceURL("os-quota-class-sets", "default")
+ var result gophercloud.Result
+ _, err := novaV2.Get(ctx, url, &result.Body, nil) //nolint:bodyclose
+ if err != nil {
+ return nil, err
+ }
+
+ var body struct {
+ //NOTE: cannot use map[string]int64 here because this object contains the
+ // field "id": "default" (curse you, untyped JSON)
+ QuotaClassSet map[string]any `json:"quota_class_set"`
+ }
+ err = result.ExtractInto(&body)
+ return body.QuotaClassSet, err
+}
+
+// BuildServiceInfo implements the liquidapi.Logic interface.
+func (l *Logic) BuildServiceInfo(ctx context.Context) (liquid.ServiceInfo, error) {
+ resources := map[liquid.ResourceName]liquid.ResourceInfo{
+ "cores": {
+ Unit: liquid.UnitNone,
+ HasCapacity: true,
+ HasQuota: true,
+ NeedsResourceDemand: true,
+ },
+ "instances": {
+ Unit: liquid.UnitNone,
+ HasCapacity: true,
+ HasQuota: true,
+ NeedsResourceDemand: true,
+ },
+ "ram": {
+ Unit: liquid.UnitMebibytes,
+ HasCapacity: true,
+ HasQuota: true,
+ NeedsResourceDemand: true,
+ },
+ "server_groups": {
+ Unit: liquid.UnitNone,
+ HasQuota: true,
+ },
+ "server_group_members": {
+ Unit: liquid.UnitNone,
+ HasQuota: true,
+ },
+ }
+
+ err := FlavorSelection{}.ForeachFlavor(ctx, l.NovaV2, func(f flavors.Flavor) error {
+ if IsIronicFlavor(f) {
+ return nil
+ }
+ if IsSplitFlavor(f) {
+ resources[ResourceNameForFlavor(f.Name)] = liquid.ResourceInfo{
+ Unit: liquid.UnitNone,
+ HasCapacity: true,
+ HasQuota: true,
+ NeedsResourceDemand: true,
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return liquid.ServiceInfo{}, err
+ }
+
+ for _, resourceName := range l.hwVersionResources {
+ unit := liquid.UnitNone
+ if strings.HasSuffix(string(resourceName), "ram") {
+ unit = liquid.UnitMebibytes
+ }
+ resources[resourceName] = liquid.ResourceInfo{
+ Unit: unit,
+ HasQuota: true,
+ }
+ }
+
+ return liquid.ServiceInfo{
+ Version: time.Now().Unix(),
+ Resources: resources,
+ }, nil
+}
+
+// SetQuota implements the liquidapi.Logic interface.
+func (l *Logic) SetQuota(ctx context.Context, projectUUID string, req liquid.ServiceQuotaRequest, serviceInfo liquid.ServiceInfo) error {
+ opts := make(novaQuotaUpdateOpts, len(serviceInfo.Resources))
+ for resName := range serviceInfo.Resources {
+ opts[string(resName)] = req.Resources[resName].Quota
+ }
+ return quotasets.Update(ctx, l.NovaV2, projectUUID, opts).Err
+}
+
+func (l *Logic) IgnoreFlavor(flavorName string) bool {
+ return slices.Contains(l.ignoredFlavorNames, flavorName)
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// custom types for OpenStack APIs
+
+type novaQuotaUpdateOpts map[string]uint64
+
+func (opts novaQuotaUpdateOpts) ToComputeQuotaUpdateMap() (map[string]any, error) {
+ return map[string]any{"quota_set": opts}, nil
+}
diff --git a/internal/plugins/nova/ostype_prober.go b/internal/liquids/nova/ostype_prober.go
similarity index 100%
rename from internal/plugins/nova/ostype_prober.go
rename to internal/liquids/nova/ostype_prober.go
diff --git a/internal/plugins/nova/server_group_prober.go b/internal/liquids/nova/server_group_prober.go
similarity index 100%
rename from internal/plugins/nova/server_group_prober.go
rename to internal/liquids/nova/server_group_prober.go
diff --git a/internal/liquids/nova/subresources.go b/internal/liquids/nova/subresources.go
new file mode 100644
index 000000000..a7780534e
--- /dev/null
+++ b/internal/liquids/nova/subresources.go
@@ -0,0 +1,131 @@
+/*******************************************************************************
+*
+* Copyright 2024 SAP SE
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You should have received a copy of the License along with this
+* program. If not, you may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*
+*******************************************************************************/
+
+package nova
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/gophercloud/gophercloud/v2"
+ "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers"
+ "github.com/gophercloud/gophercloud/v2/pagination"
+ "github.com/sapcc/go-api-declarations/liquid"
+)
+
+type SubresourceAttributes struct {
+ // base metadata
+ Status string `json:"status"`
+ Metadata map[string]string `json:"metadata"`
+ Tags []string `json:"tags"`
+ // placement information
+ AZ liquid.AvailabilityZone `json:"availability_zone"`
+ // information from flavor
+ FlavorName string `json:"flavor"`
+ VCPUs uint64 `json:"vcpu"`
+ MemoryMiB uint64 `json:"ram"`
+ DiskGiB uint64 `json:"disk"`
+ VideoMemoryMiB *uint64 `json:"video_ram,omitempty"`
+ HWVersion string `json:"-"` // this is only used for sorting the subresource into the right resource
+ // information from image
+ OSType string `json:"os_type"`
+}
+
+func (l *Logic) buildInstanceSubresource(ctx context.Context, instance servers.Server) (res liquid.SubresourceBuilder[SubresourceAttributes], err error) {
+ // copy base attributes
+ res.ID = instance.ID
+ res.Name = instance.Name
+
+ attrs := SubresourceAttributes{
+ Status: instance.Status,
+ AZ: liquid.AvailabilityZone(instance.AvailabilityZone),
+ Metadata: instance.Metadata,
+ }
+ if instance.Tags != nil {
+ attrs.Tags = *instance.Tags
+ }
+
+ // flavor data is given to us as a map[string]any, but we want something more structured
+ buf, err := json.Marshal(instance.Flavor)
+ if err != nil {
+ return res, fmt.Errorf("could not reserialize flavor data for instance %s: %w", instance.ID, err)
+ }
+ var flavorInfo FlavorInfo
+ err = json.Unmarshal(buf, &flavorInfo)
+ if err != nil {
+ return res, fmt.Errorf("could not parse flavor data for instance %s: %w", instance.ID, err)
+ }
+
+ // copy attributes from flavor data
+ attrs.FlavorName = flavorInfo.OriginalName
+ attrs.VCPUs = flavorInfo.VCPUs
+ attrs.MemoryMiB = flavorInfo.MemoryMiB
+ attrs.DiskGiB = flavorInfo.DiskGiB
+ if videoRAMStr, exists := flavorInfo.ExtraSpecs["hw_video:ram_max_mb"]; exists {
+ videoRAMVal, err := strconv.ParseUint(videoRAMStr, 10, 64)
+ if err == nil {
+ attrs.VideoMemoryMiB = &videoRAMVal
+ }
+ }
+ attrs.HWVersion = flavorInfo.ExtraSpecs["quota:hw_version"]
+
+ // calculate classifications based on image data
+ attrs.OSType = l.OSTypeProber.Get(ctx, instance)
+
+ res.Attributes = attrs
+ return res, nil
+}
+
+func (l *Logic) buildInstanceSubresources(ctx context.Context, projectUUID string) ([]liquid.SubresourceBuilder[SubresourceAttributes], error) {
+ opts := novaServerListOpts{
+ AllTenants: true,
+ TenantID: projectUUID,
+ }
+
+ var result []liquid.SubresourceBuilder[SubresourceAttributes]
+ err := servers.List(l.NovaV2, opts).EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) {
+ var instances []servers.Server
+ err := servers.ExtractServersInto(page, &instances)
+ if err != nil {
+ return false, err
+ }
+
+ for _, instance := range instances {
+ res, err := l.buildInstanceSubresource(ctx, instance)
+ if err != nil {
+ return false, err
+ }
+ result = append(result, res)
+ }
+ return true, nil
+ })
+ return result, err
+}
+
+type novaServerListOpts struct {
+ AllTenants bool `q:"all_tenants"`
+ TenantID string `q:"tenant_id"`
+}
+
+func (opts novaServerListOpts) ToServerListQuery() (string, error) {
+ q, err := gophercloud.BuildQueryString(opts)
+ return q.String(), err
+}
diff --git a/internal/plugins/nova/types.go b/internal/liquids/nova/types.go
similarity index 100%
rename from internal/plugins/nova/types.go
rename to internal/liquids/nova/types.go
diff --git a/internal/liquids/nova/usage.go b/internal/liquids/nova/usage.go
new file mode 100644
index 000000000..50a89bcea
--- /dev/null
+++ b/internal/liquids/nova/usage.go
@@ -0,0 +1,203 @@
+/*******************************************************************************
+*
+* Copyright 2024 SAP SE
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You should have received a copy of the License along with this
+* program. If not, you may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*
+*******************************************************************************/
+
+package nova
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/limits"
+ "github.com/sapcc/go-api-declarations/liquid"
+)
+
+// UsageInAZ is like `r.PerAZ[az]`, but inserts a new zero-valued AZResourceUsageReport on first access.
+// This is useful when calculating AZ-aware usage by iterating through a list of AZ-localized objects.
+func UsageInAZ(r *liquid.ResourceUsageReport, az liquid.AvailabilityZone) *liquid.AZResourceUsageReport {
+ if r.PerAZ == nil {
+ panic("ResourceData.GetOrCreateEntry cannot operate on a nil PerAZ")
+ }
+ entry := r.PerAZ[az]
+ if entry == nil {
+ entry = &liquid.AZResourceUsageReport{}
+ r.PerAZ[az] = entry
+ }
+ return entry
+}
+
+func (l *Logic) pooledResourceName(hwVersion string, base liquid.ResourceName) liquid.ResourceName {
+ // `base` is one of "cores", "instances" or "ram"
+ if hwVersion == "" {
+ return base
+ }
+
+ // if we saw a "quota:hw_version" extra spec on the instance's flavor, use the appropriate resource if it exists
+ if l.hasPooledResource[hwVersion][base] {
+ return liquid.ResourceName(fmt.Sprintf("hw_version_%s_%s", hwVersion, base))
+ }
+ return base
+}
+
+func (l *Logic) ScanUsage(ctx context.Context, projectUUID string, req liquid.ServiceUsageRequest, serviceInfo liquid.ServiceInfo) (liquid.ServiceUsageReport, error) {
+ var limitsData struct {
+ Limits struct {
+ Absolute struct {
+ MaxTotalCores int64 `json:"maxTotalCores"`
+ MaxTotalInstances int64 `json:"maxTotalInstances"`
+ MaxTotalRAMSize int64 `json:"maxTotalRAMSize"`
+ MaxServerGroups int64 `json:"maxServerGroups"`
+ MaxServerGroupMembers int64 `json:"maxServerGroupMembers"`
+ TotalCoresUsed uint64 `json:"totalCoresUsed"`
+ TotalInstancesUsed uint64 `json:"totalInstancesUsed"`
+ TotalRAMUsed uint64 `json:"totalRAMUsed"`
+ TotalServerGroupsUsed uint64 `json:"totalServerGroupsUsed"`
+ } `json:"absolute"`
+ AbsolutePerFlavor map[string]struct {
+ MaxTotalInstances int64 `json:"maxTotalInstances"`
+ TotalInstancesUsed uint64 `json:"totalInstancesUsed"`
+ } `json:"absolutePerFlavor"`
+ AbsolutePerHWVersion map[string]struct {
+ MaxTotalCores int64 `json:"maxTotalCores"`
+ MaxTotalInstances int64 `json:"maxTotalInstances"`
+ MaxTotalRAMSize int64 `json:"maxTotalRAMSize"`
+ TotalCoresUsed uint64 `json:"totalCoresUsed"`
+ TotalInstancesUsed uint64 `json:"totalInstancesUsed"`
+ TotalRAMUsed uint64 `json:"totalRAMUsed"`
+ } `json:"absolutePerHwVersion"`
+ } `json:"limits"`
+ }
+ err := limits.Get(ctx, l.NovaV2, limits.GetOpts{TenantID: projectUUID}).ExtractInto(&limitsData)
+ if err != nil {
+ return liquid.ServiceUsageReport{}, err
+ }
+ absoluteLimits := limitsData.Limits.Absolute
+ var totalServerGroupMembersUsed uint64
+ if absoluteLimits.TotalServerGroupsUsed > 0 {
+ totalServerGroupMembersUsed, err = l.ServerGroupProber.GetMemberUsageForProject(ctx, projectUUID)
+ if err != nil {
+ return liquid.ServiceUsageReport{}, err
+ }
+ }
+
+ // initialize `Resources`
+ resources := map[liquid.ResourceName]*liquid.ResourceUsageReport{
+ "cores": {
+ Quota: &absoluteLimits.MaxTotalCores,
+ PerAZ: liquid.AZResourceUsageReport{Usage: absoluteLimits.TotalCoresUsed}.PrepareForBreakdownInto(req.AllAZs),
+ },
+ "instances": {
+ Quota: &absoluteLimits.MaxTotalInstances,
+ PerAZ: liquid.AZResourceUsageReport{Usage: absoluteLimits.TotalInstancesUsed}.PrepareForBreakdownInto(req.AllAZs),
+ },
+ "ram": {
+ Quota: &absoluteLimits.MaxTotalRAMSize,
+ PerAZ: liquid.AZResourceUsageReport{Usage: absoluteLimits.TotalRAMUsed}.PrepareForBreakdownInto(req.AllAZs),
+ },
+ "server_groups": {
+ Quota: &absoluteLimits.MaxServerGroups,
+ PerAZ: map[liquid.AvailabilityZone]*liquid.AZResourceUsageReport{liquid.AvailabilityZoneAny: {Usage: absoluteLimits.TotalServerGroupsUsed}},
+ },
+ "server_group_members": {
+ Quota: &absoluteLimits.MaxServerGroupMembers,
+ PerAZ: map[liquid.AvailabilityZone]*liquid.AZResourceUsageReport{liquid.AvailabilityZoneAny: {Usage: totalServerGroupMembersUsed}},
+ },
+ }
+ for flavorName, flavorLimits := range limitsData.Limits.AbsolutePerFlavor {
+ if l.IgnoreFlavor(flavorName) {
+ continue
+ }
+ resourceName := ResourceNameForFlavor(flavorName)
+ resources[resourceName] = &liquid.ResourceUsageReport{
+ Quota: &flavorLimits.MaxTotalInstances,
+ PerAZ: liquid.AZResourceUsageReport{Usage: flavorLimits.TotalInstancesUsed}.PrepareForBreakdownInto(req.AllAZs),
+ }
+ }
+ for hwVersion, limits := range limitsData.Limits.AbsolutePerHWVersion {
+ if l.hasPooledResource[hwVersion]["cores"] {
+ resources[l.pooledResourceName(hwVersion, "cores")] = &liquid.ResourceUsageReport{
+ Quota: &limits.MaxTotalCores,
+ PerAZ: liquid.AZResourceUsageReport{Usage: limits.TotalCoresUsed}.PrepareForBreakdownInto(req.AllAZs),
+ }
+ }
+ if l.hasPooledResource[hwVersion]["instances"] {
+ resources[l.pooledResourceName(hwVersion, "instances")] = &liquid.ResourceUsageReport{
+ Quota: &limits.MaxTotalInstances,
+ PerAZ: liquid.AZResourceUsageReport{Usage: limits.TotalInstancesUsed}.PrepareForBreakdownInto(req.AllAZs),
+ }
+ }
+ if l.hasPooledResource[hwVersion]["ram"] {
+ resources[l.pooledResourceName(hwVersion, "ram")] = &liquid.ResourceUsageReport{
+ Quota: &limits.MaxTotalRAMSize,
+ PerAZ: liquid.AZResourceUsageReport{Usage: limits.TotalRAMUsed}.PrepareForBreakdownInto(req.AllAZs),
+ }
+ }
+ }
+
+ // Nova does not have a native API for AZ-aware usage reporting,
+ // so we will obtain AZ-aware usage stats by counting up all subresources,
+ // even if we don't end up showing them in the API
+ allSubresourceBuilders, err := l.buildInstanceSubresources(ctx, projectUUID)
+ if err != nil {
+ return liquid.ServiceUsageReport{}, fmt.Errorf("while collecting instance data: %w", err)
+ }
+
+ for _, subresBuilder := range allSubresourceBuilders {
+ attrs := subresBuilder.Attributes
+
+ az := attrs.AZ
+
+ if l.IgnoreFlavor(attrs.FlavorName) {
+ continue
+ }
+
+ // use separate instance resource if we have a matching "instances_$FLAVOR" resource
+ instanceResourceName := ResourceNameForFlavor(attrs.FlavorName)
+ isPooled := false
+ if _, exists := resources[instanceResourceName]; !exists {
+ // otherwise used the appropriate pooled instance resource
+ isPooled = true
+ instanceResourceName = l.pooledResourceName(attrs.HWVersion, "instances")
+ }
+
+ // count subresource towards "instances" (or separate instance resource)
+ resources[instanceResourceName].AddLocalizedUsage(az, 1)
+ if l.WithSubresources {
+ azData := UsageInAZ(resources[instanceResourceName], az)
+ subres, err := subresBuilder.Finalize()
+ if err != nil {
+ return liquid.ServiceUsageReport{}, fmt.Errorf("could not serialze attributes of subresource: %w", err)
+ }
+ azData.Subresources = append(azData.Subresources, subres)
+ }
+
+ // if counted towards separate instance resource, do not count towards "cores" and "ram"
+ if !isPooled {
+ continue
+ }
+
+ // count towards "cores" and "ram" under the appropriate pooled resource
+ resources[l.pooledResourceName(attrs.HWVersion, "cores")].AddLocalizedUsage(az, attrs.VCPUs)
+ resources[l.pooledResourceName(attrs.HWVersion, "ram")].AddLocalizedUsage(az, attrs.MemoryMiB)
+ }
+
+ return liquid.ServiceUsageReport{
+ InfoVersion: serviceInfo.Version,
+ Resources: resources,
+ }, nil
+}
diff --git a/internal/plugins/capacity_nova.go b/internal/plugins/capacity_nova.go
index 7c1c8794e..fbb9762ac 100644
--- a/internal/plugins/capacity_nova.go
+++ b/internal/plugins/capacity_nova.go
@@ -39,7 +39,7 @@ import (
"github.com/sapcc/limes/internal/core"
"github.com/sapcc/limes/internal/db"
"github.com/sapcc/limes/internal/liquids"
- "github.com/sapcc/limes/internal/plugins/nova"
+ "github.com/sapcc/limes/internal/liquids/nova"
)
type capacityNovaPlugin struct {
@@ -293,6 +293,7 @@ func (p *capacityNovaPlugin) Scrape(ctx context.Context, backchannel core.Capaci
logg.Debug("could not simulate placement of known instance %s on %s", instance.ID, hv.Match.Hypervisor.Description())
}
}
+ break
}
}
@@ -443,7 +444,7 @@ func (p *capacityNovaPlugin) Scrape(ctx context.Context, backchannel core.Capaci
for az, hypervisors := range hypervisorsByAZ {
var (
azCapacity nova.PartialCapacity
- builder nova.PooledSubcapacityBuilder
+ builder nova.DeprecatedPooledSubcapacityBuilder
)
for _, h := range hypervisors {
azCapacity.Add(h.Match.PartialCapacity())
@@ -458,9 +459,9 @@ func (p *capacityNovaPlugin) Scrape(ctx context.Context, backchannel core.Capaci
}
}
- capacities[p.PooledCoresResourceName][az] = pointerTo(azCapacity.IntoCapacityData("cores", float64(maxRootDiskSize), builder.CoresSubcapacities))
- capacities[p.PooledInstancesResourceName][az] = pointerTo(azCapacity.IntoCapacityData("instances", float64(maxRootDiskSize), builder.InstancesSubcapacities))
- capacities[p.PooledRAMResourceName][az] = pointerTo(azCapacity.IntoCapacityData("ram", float64(maxRootDiskSize), builder.RAMSubcapacities))
+ capacities[p.PooledCoresResourceName][az] = pointerTo(azCapacity.DeprecatedIntoCapacityData("cores", float64(maxRootDiskSize), builder.CoresSubcapacities))
+ capacities[p.PooledInstancesResourceName][az] = pointerTo(azCapacity.DeprecatedIntoCapacityData("instances", float64(maxRootDiskSize), builder.InstancesSubcapacities))
+ capacities[p.PooledRAMResourceName][az] = pointerTo(azCapacity.DeprecatedIntoCapacityData("ram", float64(maxRootDiskSize), builder.RAMSubcapacities))
for _, flavor := range splitFlavors {
count := hypervisors.PlacementCountForFlavor(flavor.Name)
capacities[p.PooledCoresResourceName][az].Capacity -= coresDemand.OvercommitFactor.ApplyInReverseTo(count * liquids.AtLeastZero(flavor.VCPUs))
@@ -481,7 +482,7 @@ func (p *capacityNovaPlugin) Scrape(ctx context.Context, backchannel core.Capaci
for az, hypervisors := range hypervisorsByAZ {
// if we could not report subcapacities on pooled resources, report them on
// the first flavor in alphabetic order (this is why we just sorted them)
- var builder nova.SplitFlavorSubcapacityBuilder
+ var builder nova.DeprecatedSplitFlavorSubcapacityBuilder
if p.WithSubcapacities && p.PooledCoresResourceName == "" && idx == 0 {
for _, h := range hypervisors {
builder.AddHypervisor(h.Match)
diff --git a/internal/plugins/nova.go b/internal/plugins/nova.go
index 81df22afb..af50a799f 100644
--- a/internal/plugins/nova.go
+++ b/internal/plugins/nova.go
@@ -39,7 +39,7 @@ import (
"github.com/sapcc/limes/internal/core"
"github.com/sapcc/limes/internal/db"
- "github.com/sapcc/limes/internal/plugins/nova"
+ "github.com/sapcc/limes/internal/liquids/nova"
)
type novaPlugin struct {
diff --git a/internal/plugins/nova/capacity.go b/internal/plugins/nova/capacity.go
deleted file mode 100644
index 89dc46980..000000000
--- a/internal/plugins/nova/capacity.go
+++ /dev/null
@@ -1,112 +0,0 @@
-/*******************************************************************************
-*
-* Copyright 2019-2024 SAP SE
-*
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You should have received a copy of the License along with this
-* program. If not, you may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*
-*******************************************************************************/
-
-package nova
-
-import (
- "fmt"
-
- "github.com/sapcc/limes/internal/core"
-)
-
-// PartialCapacity describes compute capacity at a level below the entire
-// cluster (e.g. for a single hypervisor, aggregate or AZ).
-type PartialCapacity struct {
- VCPUs PartialCapacityMetric
- MemoryMB PartialCapacityMetric
- LocalGB PartialCapacityMetric
- RunningVMs uint64
- MatchingAggregates map[string]bool
- Subcapacities []any // only filled on AZ level
-}
-
-func (c *PartialCapacity) Add(other PartialCapacity) {
- c.VCPUs.Capacity += other.VCPUs.Capacity
- c.VCPUs.Usage += other.VCPUs.Usage
- c.MemoryMB.Capacity += other.MemoryMB.Capacity
- c.MemoryMB.Usage += other.MemoryMB.Usage
- c.LocalGB.Capacity += other.LocalGB.Capacity
- c.LocalGB.Usage += other.LocalGB.Usage
- c.RunningVMs += other.RunningVMs
-
- if c.MatchingAggregates == nil {
- c.MatchingAggregates = make(map[string]bool)
- }
- for aggrName, matches := range other.MatchingAggregates {
- if matches {
- c.MatchingAggregates[aggrName] = true
- }
- }
-}
-
-func (c PartialCapacity) CappedToUsage() PartialCapacity {
- return PartialCapacity{
- VCPUs: c.VCPUs.CappedToUsage(),
- MemoryMB: c.MemoryMB.CappedToUsage(),
- LocalGB: c.LocalGB.CappedToUsage(),
- RunningVMs: c.RunningVMs,
- MatchingAggregates: c.MatchingAggregates,
- Subcapacities: c.Subcapacities,
- }
-}
-
-func (c PartialCapacity) IntoCapacityData(resourceName string, maxRootDiskSize float64, subcapacities []any) core.CapacityData {
- switch resourceName {
- case "cores":
- return core.CapacityData{
- Capacity: c.VCPUs.Capacity,
- Usage: &c.VCPUs.Usage,
- Subcapacities: subcapacities,
- }
- case "ram":
- return core.CapacityData{
- Capacity: c.MemoryMB.Capacity,
- Usage: &c.MemoryMB.Usage,
- Subcapacities: subcapacities,
- }
- case "instances":
- amount := 10000 * uint64(len(c.MatchingAggregates))
- if maxRootDiskSize != 0 {
- maxAmount := uint64(float64(c.LocalGB.Capacity) / maxRootDiskSize)
- if amount > maxAmount {
- amount = maxAmount
- }
- }
- return core.CapacityData{
- Capacity: amount,
- Usage: &c.RunningVMs,
- Subcapacities: subcapacities,
- }
- default:
- panic(fmt.Sprintf("called with unknown resourceName %q", resourceName))
- }
-}
-
-// PartialCapacityMetric appears in type PartialCapacity.
-type PartialCapacityMetric struct {
- Capacity uint64
- Usage uint64
-}
-
-func (m PartialCapacityMetric) CappedToUsage() PartialCapacityMetric {
- return PartialCapacityMetric{
- Capacity: min(m.Capacity, m.Usage),
- Usage: m.Usage,
- }
-}
diff --git a/internal/plugins/nova_subresources.go b/internal/plugins/nova_subresources.go
index 4863ab36b..59e3ea7a9 100644
--- a/internal/plugins/nova_subresources.go
+++ b/internal/plugins/nova_subresources.go
@@ -31,7 +31,7 @@ import (
"github.com/sapcc/go-api-declarations/limes"
"github.com/sapcc/limes/internal/core"
- "github.com/sapcc/limes/internal/plugins/nova"
+ "github.com/sapcc/limes/internal/liquids/nova"
)
// A compute instance as shown in our compute/instances subresources.
diff --git a/main.go b/main.go
index d9ecdf06b..9c12d303a 100644
--- a/main.go
+++ b/main.go
@@ -64,6 +64,7 @@ import (
"github.com/sapcc/limes/internal/liquids/ironic"
"github.com/sapcc/limes/internal/liquids/manila"
"github.com/sapcc/limes/internal/liquids/neutron"
+ "github.com/sapcc/limes/internal/liquids/nova"
"github.com/sapcc/limes/internal/liquids/octavia"
"github.com/sapcc/limes/internal/liquids/swift"
"github.com/sapcc/limes/internal/util"
@@ -120,6 +121,9 @@ func main() {
must.Succeed(liquidapi.Run(ctx, &manila.Logic{}, opts))
case "neutron":
must.Succeed(liquidapi.Run(ctx, &neutron.Logic{}, opts))
+ case "nova":
+ opts.TakesConfiguration = true
+ must.Succeed(liquidapi.Run(ctx, &nova.Logic{}, opts))
case "octavia":
must.Succeed(liquidapi.Run(ctx, &octavia.Logic{}, opts))
case "swift":
@@ -323,6 +327,10 @@ func taskTestGetQuota(ctx context.Context, cluster *core.Cluster, args []string)
result, serializedMetrics, err := cluster.QuotaPlugins[serviceType].Scrape(ctx, project, cluster.Config.AvailabilityZones)
must.Succeed(err)
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", " ")
+ must.Succeed(enc.Encode(result))
+
for resourceName := range result {
if !cluster.HasResource(serviceType, resourceName) {
logg.Fatal("scrape returned data for unknown resource: %s/%s", serviceType, resourceName)
@@ -339,7 +347,7 @@ func taskTestGetQuota(ctx context.Context, cluster *core.Cluster, args []string)
})
dumpGeneratedPrometheusMetrics()
- enc := json.NewEncoder(os.Stdout)
+ enc = json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
must.Succeed(enc.Encode(result))
}