Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Syncing latest changes from upstream master for rook #678

Merged
merged 6 commits into from
Jul 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ kind: BucketClass
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: sample-bcc
driverName: ceph.objectstorage.k8s.io
driverName: rook-ceph.ceph.objectstorage.k8s.io
deletionPolicy: Delete
parameters:
objectStoreUserSecretName: rook-ceph-object-user-my-store-cosi
Expand All @@ -63,7 +63,7 @@ kind: BucketAccessClass
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: sample-bac
driverName: ceph.objectstorage.k8s.io
driverName: rook-ceph.ceph.objectstorage.k8s.io
authenticationType: KEY
parameters:
objectStoreUserSecretName: rook-ceph-object-user-my-store-cosi
Expand Down
2 changes: 2 additions & 0 deletions PendingReleaseNotes.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,7 @@

## Breaking Changes

- Updating Ceph COSI driver images, this impact existing COSI `Buckets` and `BucketAccesses`,
please update the `BucketClass` and `BucketAccessClass` for resolving refer [here](https://github.com/rook/rook/discussions/14297)

## Features
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ Please use the following to reach members of the community:

## Community Meeting

A regular community meeting takes place every other [Tuesday at 9:00 AM PT (Pacific Time)](https://zoom.us/j/98052644520?pwd=K0R4RUZCc3NhQisyMnA5VlV2MVBhQT09).
A regular community meeting takes place the [2nd Tuesday of every month at 9:00 AM PT (Pacific Time)](https://zoom.us/j/98052644520?pwd=K0R4RUZCc3NhQisyMnA5VlV2MVBhQT09).
Convert to your [local timezone](http://www.thetimezoneconverter.com/?t=9:00&tz=PT%20%28Pacific%20Time%29).

Any changes to the meeting schedule will be added to the [agenda doc](https://docs.google.com/document/d/1exd8_IG6DkdvyA0eiTtL2z5K2Ra-y68VByUUgwP7I9A/edit?usp=sharing) and posted to [Slack #announcements](https://rook-io.slack.com/messages/C76LLCEE7/).
Expand Down
2 changes: 1 addition & 1 deletion deploy/examples/cosi/bucketaccessclass.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ kind: BucketAccessClass
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: sample-bac
driverName: ceph.objectstorage.k8s.io
driverName: rook-ceph.ceph.objectstorage.k8s.io
authenticationType: KEY
parameters:
objectStoreUserSecretName: rook-ceph-object-user-my-store-cosi
Expand Down
2 changes: 1 addition & 1 deletion deploy/examples/cosi/bucketclass.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ kind: BucketClass
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: sample-bcc
driverName: ceph.objectstorage.k8s.io
driverName: rook-ceph.ceph.objectstorage.k8s.io
deletionPolicy: Delete
parameters:
objectStoreUserSecretName: rook-ceph-object-user-my-store-cosi
Expand Down
4 changes: 2 additions & 2 deletions deploy/examples/images.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
gcr.io/k8s-staging-sig-storage/objectstorage-sidecar/objectstorage-sidecar:v20230130-v0.1.0-24-gc0cf995
gcr.io/k8s-staging-sig-storage/objectstorage-sidecar:v20240513-v0.1.0-35-gefb3255
quay.io/ceph/ceph:v18.2.2
quay.io/ceph/cosi:v0.1.1
quay.io/ceph/cosi:v0.1.2
quay.io/cephcsi/cephcsi:v3.11.0
quay.io/csiaddons/k8s-sidecar:v0.8.0
registry.k8s.io/sig-storage/csi-attacher:v4.5.1
Expand Down
4 changes: 4 additions & 0 deletions pkg/daemon/ceph/client/pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -468,6 +468,10 @@ func updatePoolCrushRule(context *clusterd.Context, clusterInfo *ClusterInfo, cl
logger.Debugf("Skipping crush rule update for pool %q: EnableCrushUpdates is disabled", pool.Name)
return nil
}
if clusterSpec.IsStretchCluster() {
logger.Debugf("skipping crush rule update for pool %q in a stretch cluster", pool.Name)
return nil
}

if pool.FailureDomain == "" && pool.DeviceClass == "" {
logger.Debugf("skipping check for failure domain and deviceClass on pool %q as it is not specified", pool.Name)
Expand Down
23 changes: 23 additions & 0 deletions pkg/daemon/ceph/client/pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,10 +251,12 @@ func TestUpdateFailureDomain(t *testing.T) {
currentFailureDomain := "rack"
currentDeviceClass := "default"
testCrushRuleName := "test_rule"
cephCommandCalled := false
executor := &exectest.MockExecutor{}
context := &clusterd.Context{Executor: executor}
executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
logger.Infof("Command: %s %v", command, args)
cephCommandCalled = true
if args[1] == "pool" {
if args[2] == "get" {
assert.Equal(t, "mypool", args[3])
Expand Down Expand Up @@ -335,6 +337,27 @@ func TestUpdateFailureDomain(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, "mypool_zone", newCrushRule)
})

t.Run("stretch cluster skips crush rule update", func(t *testing.T) {
p := cephv1.NamedPoolSpec{
Name: "mypool",
PoolSpec: cephv1.PoolSpec{
FailureDomain: "zone",
Replicated: cephv1.ReplicatedSpec{Size: 3},
EnableCrushUpdates: true,
},
}
clusterSpec := &cephv1.ClusterSpec{
Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.MonZoneSpec{{Name: "zone1"}, {Name: "zone2"}, {Name: "zone3", Arbiter: true}}}},
Storage: cephv1.StorageScopeSpec{},
}
newCrushRule = ""
cephCommandCalled = false
err := updatePoolCrushRule(context, AdminTestClusterInfo("mycluster"), clusterSpec, p)
assert.NoError(t, err)
assert.Equal(t, "", newCrushRule)
assert.False(t, cephCommandCalled)
})
}

func TestExtractPoolDetails(t *testing.T) {
Expand Down
1 change: 1 addition & 0 deletions pkg/operator/ceph/object/cosi/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ const (
cosiSocketMountPath = "/var/lib/cosi"
DefaultServiceAccountName = "objectstorage-provisioner"
cosiSocketVolumeName = "socket"
CephCOSIDriverPrefix = "rook-ceph"
)

var (
Expand Down
7 changes: 5 additions & 2 deletions pkg/operator/ceph/object/cosi/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ import (
)

const (
defaultCOSISideCarImage = "gcr.io/k8s-staging-sig-storage/objectstorage-sidecar/objectstorage-sidecar:v20230130-v0.1.0-24-gc0cf995"
defaultCephCOSIDriverImage = "quay.io/ceph/cosi:v0.1.1"
defaultCOSISideCarImage = "gcr.io/k8s-staging-sig-storage/objectstorage-sidecar:v20240513-v0.1.0-35-gefb3255"
defaultCephCOSIDriverImage = "quay.io/ceph/cosi:v0.1.2"
)

func createCephCOSIDriverDeployment(cephCOSIDriver *cephv1.CephCOSIDriver) (*appsv1.Deployment, error) {
Expand Down Expand Up @@ -114,6 +114,9 @@ func createCOSIDriverContainer(cephCOSIDriver *cephv1.CephCOSIDriver) corev1.Con
return corev1.Container{
Name: CephCOSIDriverName,
Image: cephCOSIDriveImage,
Args: []string{
"--driver-prefix=" + CephCOSIDriverPrefix,
},
Env: []corev1.EnvVar{
{Name: "POD_NAMESPACE", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}}}},
VolumeMounts: []corev1.VolumeMount{
Expand Down
2 changes: 1 addition & 1 deletion tests/framework/installer/ceph_manifests.go
Original file line number Diff line number Diff line change
Expand Up @@ -682,7 +682,7 @@ kind: BucketClass
metadata:
name: ` + name + `
namespace: ` + m.settings.OperatorNamespace + `
driverName: ceph.objectstorage.k8s.io
driverName: ` + cosi.CephCOSIDriverPrefix + `.ceph.objectstorage.k8s.io
deletionPolicy: ` + deletionPolicy + `
parameters:
objectStoreUserSecretName: ` + objectStoreUserSecretName + `
Expand Down
Loading