Skip to content

Commit

Permalink
Ensure created pods are bound to PVCs on schedulable nodes.
Browse files Browse the repository at this point in the history
  • Loading branch information
HackToHell committed Nov 1, 2024
1 parent 44e971e commit 97b5bfa
Show file tree
Hide file tree
Showing 4 changed files with 279 additions and 7 deletions.
17 changes: 10 additions & 7 deletions controllers/humiocluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -2092,10 +2092,14 @@ func (r *HumioClusterReconciler) ensurePersistentVolumeClaimsExist(ctx context.C
if err != nil {
return r.logErrorAndReturn(err, "failed to list pvcs")
}
r.Log.Info(fmt.Sprintf("found %d pvcs", len(foundPersistentVolumeClaims)))
filteredPersistentVolumeCalims, err := r.FilterSchedulablePVCs(ctx, foundPersistentVolumeClaims)
if err != nil {
return r.logErrorAndReturn(err, "failed to filter pvcs")
}
r.Log.Info(fmt.Sprintf("found %d pvcs", len(filteredPersistentVolumeCalims)))

if len(foundPersistentVolumeClaims) < hnp.GetNodeCount() {
r.Log.Info(fmt.Sprintf("pvc count of %d is less than %d. adding more", len(foundPersistentVolumeClaims), hnp.GetNodeCount()))
if len(filteredPersistentVolumeCalims) < hnp.GetNodeCount() {
r.Log.Info(fmt.Sprintf("pvc count of %d is less than %d. adding more", len(filteredPersistentVolumeCalims), hnp.GetNodeCount()))
pvc := constructPersistentVolumeClaim(hnp)
if err := controllerutil.SetControllerReference(hc, pvc, r.Scheme()); err != nil {
return r.logErrorAndReturn(err, "could not set controller reference")
Expand Down Expand Up @@ -2155,10 +2159,9 @@ func (r *HumioClusterReconciler) pvcList(ctx context.Context, hnp *HumioNodePool
if err != nil {
return pvcList, err
}
for _, pvc := range foundPvcList {
if pvc.DeletionTimestamp == nil {
pvcList = append(pvcList, pvc)
}
pvcList, err = r.FilterSchedulablePVCs(ctx, foundPvcList)
if err != nil {
return nil, err
}
}
return pvcList, nil
Expand Down
45 changes: 45 additions & 0 deletions controllers/humiocluster_persistent_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package controllers
import (
"context"
"fmt"
"sort"
"time"

corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -75,6 +76,15 @@ func FindNextAvailablePvc(pvcList []corev1.PersistentVolumeClaim, podList []core
}
}
}
sort.Slice(pvcList, func(i, j int) bool {
if pvcList[i].Status.Phase == corev1.ClaimBound && pvcList[j].Status.Phase != corev1.ClaimBound {
return true
}
if pvcList[i].Status.Phase != corev1.ClaimBound && pvcList[j].Status.Phase == corev1.ClaimBound {
return false
}
return pvcList[i].Name < pvcList[j].Name
})

// return first PVC that is not used by any pods
for _, pvc := range pvcList {
Expand Down Expand Up @@ -102,3 +112,38 @@ func (r *HumioClusterReconciler) waitForNewPvc(ctx context.Context, hnp *HumioNo
}
return fmt.Errorf("timed out waiting to validate new pvc with name %s was created", expectedPvc.Name)
}

func (r *HumioClusterReconciler) FilterSchedulablePVCs(ctx context.Context, persistentVolumeClaims []corev1.PersistentVolumeClaim) ([]corev1.PersistentVolumeClaim, error) {
// Ensure the PVCs are bound to nodes that are actually schedulable in the case of local PVs
schedulablePVCs := make([]corev1.PersistentVolumeClaim, 0)
for _, pvc := range persistentVolumeClaims {
if pvc.DeletionTimestamp != nil {
continue
}
//Unbound PVCs are schedulable
if pvc.Status.Phase == corev1.ClaimPending {
schedulablePVCs = append(schedulablePVCs, pvc)
continue
}
pv, err := kubernetes.GetPersistentVolume(ctx, r, pvc.Spec.VolumeName)
if err != nil {
return nil, r.logErrorAndReturn(err, fmt.Sprintf("failed to get persistent volume %s", pvc.Spec.VolumeName))
}
if pv.Spec.Local == nil {
schedulablePVCs = append(schedulablePVCs, pvc)
continue
}
node, err := kubernetes.GetNode(ctx, r, pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values[0])
if err != nil {
return nil, r.logErrorAndReturn(err, fmt.Sprintf("failed to get node %s", pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values[0]))
}
if node.Spec.Unschedulable {
r.Log.Info("PVC bound to unschedulable node skipping",
"pvc", pvc.Name,
"node", node.Name)
continue
}
schedulablePVCs = append(schedulablePVCs, pvc)
}
return schedulablePVCs, nil
}
208 changes: 208 additions & 0 deletions controllers/humiocluster_persistent_volumes_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
package controllers

import (
"context"
"github.com/go-logr/logr"
"reflect"
"testing"
"time"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)

func TestFilterSchedulablePVCs(t *testing.T) {
tests := []struct {
name string
inputPVCs []corev1.PersistentVolumeClaim
expectedPVCs []corev1.PersistentVolumeClaim
mockPV *corev1.PersistentVolume
mockNode *corev1.Node
expectedError bool
}{
{
name: "Empty PVC list",
inputPVCs: []corev1.PersistentVolumeClaim{},
expectedPVCs: []corev1.PersistentVolumeClaim{},
expectedError: false,
},
{
name: "PVC with deletion timestamp",
inputPVCs: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc-1",
DeletionTimestamp: &metav1.Time{Time: time.Now()},
},
},
},
expectedPVCs: []corev1.PersistentVolumeClaim{},
expectedError: false,
},
{
name: "Pending PVC",
inputPVCs: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-2"},
Status: corev1.PersistentVolumeClaimStatus{
Phase: corev1.ClaimPending,
},
},
},
expectedPVCs: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-2"},
Status: corev1.PersistentVolumeClaimStatus{
Phase: corev1.ClaimPending,
},
},
},
expectedError: false,
},
{
name: "Non-local PV",
inputPVCs: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-3"},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "pv-3",
},
},
},
mockPV: &corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{Name: "pv-3"},
Spec: corev1.PersistentVolumeSpec{
PersistentVolumeSource: corev1.PersistentVolumeSource{},
},
},
expectedPVCs: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-3"},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "pv-3",
},
},
},
expectedError: false,
},
{
name: "Local PV with schedulable node",
inputPVCs: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-4"},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "pv-4",
},
},
},
mockPV: &corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{Name: "pv-4"},
Spec: corev1.PersistentVolumeSpec{
PersistentVolumeSource: corev1.PersistentVolumeSource{Local: &corev1.LocalVolumeSource{}},
NodeAffinity: &corev1.VolumeNodeAffinity{
Required: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Values: []string{"node-1"},
},
},
},
},
},
},
},
},
mockNode: &corev1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node-1"},
Spec: corev1.NodeSpec{
Unschedulable: false,
},
},
expectedPVCs: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-4"},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "pv-4",
},
},
},
expectedError: false,
},
{
name: "Local PV with unschedulable node",
inputPVCs: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-5"},
Spec: corev1.PersistentVolumeClaimSpec{
VolumeName: "pv-5",
},
},
},
mockPV: &corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{Name: "pv-5"},
Spec: corev1.PersistentVolumeSpec{
PersistentVolumeSource: corev1.PersistentVolumeSource{Local: &corev1.LocalVolumeSource{}},
NodeAffinity: &corev1.VolumeNodeAffinity{
Required: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Values: []string{"node-2"},
},
},
},
},
},
},
},
},
mockNode: &corev1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node-2"},
Spec: corev1.NodeSpec{
Unschedulable: true,
},
},
expectedPVCs: []corev1.PersistentVolumeClaim{},
expectedError: false,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Create a fake client with the mock objects
client := fake.NewFakeClient()
if tt.mockPV != nil {
client.Create(context.TODO(), tt.mockPV)
}
if tt.mockNode != nil {
client.Create(context.TODO(), tt.mockNode)
}

// Create reconciler with the fake client
r := &HumioClusterReconciler{
Client: client,
Log: logr.Discard(),
}

// Call the function
result, err := r.FilterSchedulablePVCs(context.TODO(), tt.inputPVCs)

// Check error
if tt.expectedError && err == nil {
t.Error("expected error but got none")
}
if !tt.expectedError && err != nil {
t.Errorf("unexpected error: %v", err)
}

// Check result
if !reflect.DeepEqual(result, tt.expectedPVCs) {
t.Errorf("expected %v but got %v", tt.expectedPVCs, result)
}
})
}
}
16 changes: 16 additions & 0 deletions pkg/kubernetes/persistent_volume.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
package kubernetes

import (
"context"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)

func GetPersistentVolume(ctx context.Context, c client.Client, name string) (*corev1.PersistentVolume, error) {
var foundPersistentVolume corev1.PersistentVolume
err := c.Get(ctx, client.ObjectKey{Name: name}, &foundPersistentVolume)
if err != nil {
return nil, err
}
return &foundPersistentVolume, nil
}

0 comments on commit 97b5bfa

Please sign in to comment.