Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

generalize DistributeDemandFairly() #606

Merged
merged 1 commit into from
Nov 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 9 additions & 81 deletions internal/liquids/manila/capacity.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ package manila

import (
"context"
"encoding/json"
"slices"
"strings"

Expand Down Expand Up @@ -185,18 +184,23 @@ func (l *Logic) scanCapacityForShareTypeAndAZ(vst VirtualShareType, azCount uint
}

// distribute capacity and usage between the various resource types
balance := map[string]float64{
"shares": 1,
"snapshots": l.CapacityCalculation.CapacityBalance,
"snapmirrors": 0,
}
logg.Debug("distributing capacity for share_type %q, AZ %q", vst.Name, az)
distributedCapacityGiB := l.distributeByDemand(uint64(totalCapacityGB), map[string]liquid.ResourceDemandInAZ{
distributedCapacityGiB := util.DistributeDemandFairly(uint64(totalCapacityGB), map[string]liquid.ResourceDemandInAZ{
"shares": shareCapacityDemand,
"snapshots": snapshotCapacityDemand,
"snapmirrors": snapmirrorCapacityDemand,
})
}, balance)
logg.Debug("distributing usage for share_type %q, AZ %q", vst.Name, az)
distributedUsageGiB := l.distributeByDemand(uint64(allocatedCapacityGB), map[string]liquid.ResourceDemandInAZ{
distributedUsageGiB := util.DistributeDemandFairly(uint64(allocatedCapacityGB), map[string]liquid.ResourceDemandInAZ{
"shares": {Usage: shareCapacityDemand.Usage},
"snapshots": {Usage: snapshotCapacityDemand.Usage},
"snapmirrors": {Usage: snapmirrorCapacityDemand.Usage},
})
}, balance)

// build overall result
params := l.CapacityCalculation
Expand Down Expand Up @@ -253,82 +257,6 @@ func (l *Logic) scanCapacityForShareTypeAndAZ(vst VirtualShareType, azCount uint
return result, nil
}

// This implements the method we use to distribute capacity and usage between shares and snapshots:
// Each tier of demand is distributed fairly (while supplies last).
// Then anything that is not covered by demand is distributed according to the configured CapacityBalance.
//
// For capacity, each tier of demand is considered.
// For usage, the caller will set all demand fields except for Usage to 0.
func (l *Logic) distributeByDemand(totalAmount uint64, demands map[string]liquid.ResourceDemandInAZ) map[string]uint64 {
// setup phase to make each of the paragraphs below as identical as possible (for clarity)
requests := make(map[string]uint64)
result := make(map[string]uint64)
remaining := totalAmount

// tier 1: usage
for k, demand := range demands {
requests[k] = demand.Usage
}
grantedAmount := util.DistributeFairly(remaining, requests)
for k := range demands {
remaining -= grantedAmount[k]
result[k] += grantedAmount[k]
}
if logg.ShowDebug {
resultJSON, _ := json.Marshal(result) //nolint:errcheck // no reasonable way for this to fail, also only debug log
logg.Debug("distributeByDemand after phase 1: " + string(resultJSON))
}

// tier 2: unused commitments
for k, demand := range demands {
requests[k] = demand.UnusedCommitments
}
grantedAmount = util.DistributeFairly(remaining, requests)
for k := range demands {
remaining -= grantedAmount[k]
result[k] += grantedAmount[k]
}
if logg.ShowDebug {
resultJSON, _ := json.Marshal(result) //nolint:errcheck // no reasonable way for this to fail, also only debug log
logg.Debug("distributeByDemand after phase 2: " + string(resultJSON))
}

// tier 3: pending commitments
for k, demand := range demands {
requests[k] = demand.PendingCommitments
}
grantedAmount = util.DistributeFairly(remaining, requests)
for k := range demands {
remaining -= grantedAmount[k]
result[k] += grantedAmount[k]
}
if logg.ShowDebug {
resultJSON, _ := json.Marshal(result) //nolint:errcheck // no reasonable way for this to fail, also only debug log
logg.Debug("distributeByDemand after phase 2: " + string(resultJSON))
}

// final phase: distribute all remaining capacity according to the configured CapacityBalance
//
// NOTE: The CapacityBalance value says how much capacity we give out
// to snapshots as a fraction of the capacity given out to shares. For
// example, with CapacityBalance = 2, we allocate 2/3 of the total capacity to
// snapshots, and 1/3 to shares.
if remaining > 0 {
cb := l.CapacityCalculation.CapacityBalance
portionForSnapshots := uint64(cb / (cb + 1) * float64(remaining))
portionForShares := remaining - portionForSnapshots

result["snapshots"] += portionForSnapshots
result["shares"] += portionForShares
}
if logg.ShowDebug {
resultJSON, _ := json.Marshal(result) //nolint:errcheck // no reasonable way for this to fail, also only debug log
logg.Debug("distributeByDemand after CapacityBalance: " + string(resultJSON))
}

return result
}

////////////////////////////////////////////////////////////////////////////////
// internal types for capacity reporting

Expand Down
96 changes: 96 additions & 0 deletions internal/util/algorithms.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,12 @@
package util

import (
"encoding/json"
"math"
"slices"

"github.com/sapcc/go-api-declarations/liquid"
"github.com/sapcc/go-bits/logg"
)

// DistributeFairly takes a number of resource requests, as well as a total
Expand Down Expand Up @@ -84,3 +88,95 @@ func DistributeFairly[K comparable](total uint64, requested map[K]uint64) map[K]
}
return fair
}

// DistributeDemandFairly is used to distribute cluster capacity or cluster-wide usage between different resources.
// Each tier of demand is distributed fairly (while supplies last).
//
// Then anything not yet distributed is split according to the given balance numbers.
// For example, if balance = { "foo": 3, "bar": 1 }, then "foo" gets 3/4 of the remaining capacity, "bar" gets 1/4, and all other resources do not get anything extra.
func DistributeDemandFairly[K comparable](total uint64, demands map[K]liquid.ResourceDemandInAZ, balance map[K]float64) map[K]uint64 {
// setup phase to make each of the paragraphs below as identical as possible (for clarity)
requests := make(map[K]uint64)
result := make(map[K]uint64)
remaining := total

// tier 1: usage
for k, demand := range demands {
requests[k] = demand.Usage
}
grantedAmount := DistributeFairly(remaining, requests)
for k := range demands {
remaining -= grantedAmount[k]
result[k] += grantedAmount[k]
}
if logg.ShowDebug {
resultJSON, err := json.Marshal(result)
if err == nil {
logg.Debug("DistributeDemandFairly after phase 1: " + string(resultJSON))
}
}

// tier 2: unused commitments
for k, demand := range demands {
requests[k] = demand.UnusedCommitments
}
grantedAmount = DistributeFairly(remaining, requests)
for k := range demands {
remaining -= grantedAmount[k]
result[k] += grantedAmount[k]
}
if logg.ShowDebug {
resultJSON, err := json.Marshal(result)
if err == nil {
logg.Debug("DistributeDemandFairly after phase 2: " + string(resultJSON))
}
}

// tier 3: pending commitments
for k, demand := range demands {
requests[k] = demand.PendingCommitments
}
grantedAmount = DistributeFairly(remaining, requests)
for k := range demands {
remaining -= grantedAmount[k]
result[k] += grantedAmount[k]
}
if logg.ShowDebug {
resultJSON, err := json.Marshal(result)
if err == nil {
logg.Debug("DistributeDemandFairly after phase 3: " + string(resultJSON))
}
}

// final phase: distribute remainder according to the given balance
if remaining == 0 {
return result
}
for k := range demands {
// This requests incorrect ratios if `remaining` and `balance[k]` are so
// large that `balance[k] * remaining` falls outside the range of uint64.
//
// I'm accepting this since this scenario is very unlikely, and only made
// sure that there are no weird overflows, truncations and such.
requests[k] = clampFloatToUint64(balance[k] * float64(remaining))
}
grantedAmount = DistributeFairly(remaining, requests)
for k := range demands {
remaining -= grantedAmount[k]
result[k] += grantedAmount[k]
}
if logg.ShowDebug {
resultJSON, err := json.Marshal(result)
if err == nil {
logg.Debug("DistributeDemandFairly after balance: " + string(resultJSON))
}
}

return result
}

func clampFloatToUint64(x float64) uint64 {
x = max(x, 0)
x = min(x, math.MaxUint64)
return uint64(x)
}
103 changes: 103 additions & 0 deletions internal/util/algorithms_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ package util
import (
"testing"

"github.com/sapcc/go-api-declarations/liquid"
"github.com/sapcc/go-bits/assert"

"github.com/sapcc/limes/internal/db"
Expand All @@ -48,3 +49,105 @@ func TestDistributeFairlyWithLargeNumbers(t *testing.T) {
404: total / 4,
})
}

func TestDistributeDemandFairlyWithJustBalance(t *testing.T) {
// no demand, just balance
total := uint64(400)
demands := map[string]liquid.ResourceDemandInAZ{
"foo": {},
"bar": {},
}
balance := map[string]float64{
"foo": 2,
"bar": 1,
}
result := DistributeDemandFairly(total, demands, balance)
assert.DeepEqual(t, "output of DistributeDemandFairly", result, map[string]uint64{
"foo": 267,
"bar": 133,
})
}

func TestDistributeDemandFairlyWithIncreasingCapacity(t *testing.T) {
// This test uses the same demands and balance throughout, but capacity
// increases over time to test how different types of demand are considered
// in order.
demands := map[string]liquid.ResourceDemandInAZ{
"first": {
Usage: 500,
UnusedCommitments: 50,
PendingCommitments: 10,
},
"second": {
Usage: 300,
UnusedCommitments: 200,
PendingCommitments: 20,
},
"third": {
Usage: 0,
UnusedCommitments: 100,
PendingCommitments: 70,
},
}
balance := map[string]float64{
"first": 0,
"second": 1,
"third": 1,
}

// usage cannot be covered
result := DistributeDemandFairly(200, demands, balance)
assert.DeepEqual(t, "output of DistributeDemandFairly", result, map[string]uint64{
"first": 125,
"second": 75,
"third": 0,
})

// usage is exactly covered
result = DistributeDemandFairly(800, demands, balance)
assert.DeepEqual(t, "output of DistributeDemandFairly", result, map[string]uint64{
"first": 500,
"second": 300,
"third": 0,
})

// unused commitments cannot be covered
result = DistributeDemandFairly(900, demands, balance)
assert.DeepEqual(t, "output of DistributeDemandFairly", result, map[string]uint64{
"first": 514,
"second": 357,
"third": 29,
})

// unused commitments are exactly covered
result = DistributeDemandFairly(1150, demands, balance)
assert.DeepEqual(t, "output of DistributeDemandFairly", result, map[string]uint64{
"first": 550,
"second": 500,
"third": 100,
})

// pending commitments cannot be covered
result = DistributeDemandFairly(1160, demands, balance)
assert.DeepEqual(t, "output of DistributeDemandFairly", result, map[string]uint64{
"first": 551,
"second": 502,
"third": 107,
})

// unused commitments are exactly covered
result = DistributeDemandFairly(1250, demands, balance)
assert.DeepEqual(t, "output of DistributeDemandFairly", result, map[string]uint64{
"first": 560,
"second": 520,
"third": 170,
})

// extra capacity is distributed according to balance
result = DistributeDemandFairly(2250, demands, balance)
assert.DeepEqual(t, "output of DistributeDemandFairly", result, map[string]uint64{
"first": 560,
"second": 1020,
"third": 670,
})
}