Skip to content

Commit

Permalink
SP unit tests
Browse files Browse the repository at this point in the history
- Cephfs tests added for cases of provider and non-provider modes
- storageclassrequests tests added

Signed-off-by: raaizik <[email protected]>
Co-authored-by: Jose A. Rivera <[email protected]>
  • Loading branch information
raaizik and jarrpa committed Jan 29, 2024
1 parent 6e7dad4 commit 6b1a306
Show file tree
Hide file tree
Showing 4 changed files with 551 additions and 8 deletions.
238 changes: 238 additions & 0 deletions controllers/storageclassrequest/storageclassrequest_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package storageclassrequest
import (
"context"
"fmt"
"strings"
"testing"

v1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
Expand All @@ -33,6 +34,9 @@ import (
)

const (
pgAutoscaleMode = "pg_autoscale_mode"
pgNum = "pg_num"
pgpNum = "pgp_num"
namespaceName = "test-ns"
deviceClass = "ssd"
storageProfileKind = "StorageProfile"
Expand All @@ -49,6 +53,46 @@ var fakeStorageProfile = &v1.StorageProfile{
},
}

var validStorageProfile = &v1.StorageProfile{
TypeMeta: metav1.TypeMeta{Kind: storageProfileKind},
ObjectMeta: metav1.ObjectMeta{
Name: "valid",
Namespace: namespaceName,
},
Spec: v1.StorageProfileSpec{
DeviceClass: deviceClass,
BlockPoolConfiguration: v1.BlockPoolConfigurationSpec{
Parameters: map[string]string{
pgAutoscaleMode: "on",
pgNum: "128",
pgpNum: "128",
},
},
},
Status: v1.StorageProfileStatus{Phase: ""},
}

// A rejected StorageProfile is one that is invalid due to having a blank device class field and is set to
// Rejected in its phase.
var rejectedStorageProfile = &v1.StorageProfile{
TypeMeta: metav1.TypeMeta{Kind: storageProfileKind},
ObjectMeta: metav1.ObjectMeta{
Name: "rejected",
Namespace: namespaceName,
},
Spec: v1.StorageProfileSpec{
DeviceClass: "",
BlockPoolConfiguration: v1.BlockPoolConfigurationSpec{
Parameters: map[string]string{
pgAutoscaleMode: "on",
pgNum: "128",
pgpNum: "128",
},
},
},
Status: v1.StorageProfileStatus{Phase: ""},
}

var fakeStorageCluster = &v1.StorageCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-storagecluster",
Expand Down Expand Up @@ -225,6 +269,200 @@ func TestProfileReconcile(t *testing.T) {
assert.NoError(t, err, caseLabel)
}

func TestStorageProfileCephBlockPool(t *testing.T) {
var err error
var caseCounter int

var primaryTestCases = []struct {
label string
expectedPoolName string
failureExpected bool
createObjects []runtime.Object
storageProfile *v1.StorageProfile
}{
{
label: "valid profile",
expectedPoolName: "test-valid-blockpool",
failureExpected: false,
storageProfile: validStorageProfile,
createObjects: []runtime.Object{
&rookCephv1.CephBlockPool{
ObjectMeta: metav1.ObjectMeta{
Name: "test-valid-blockpool",
Namespace: namespaceName,
Labels: map[string]string{
controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name,
controllers.StorageProfileSpecLabel: validStorageProfile.GetSpecHash(),
},
}, Spec: rookCephv1.NamedBlockPoolSpec{
Name: "spec",
PoolSpec: rookCephv1.PoolSpec{
FailureDomain: "zone",
DeviceClass: deviceClass,
Parameters: map[string]string{},
},
},
},
},
},
{
label: "rejected profile",
expectedPoolName: "test-rejected-blockpool",
failureExpected: true,
storageProfile: rejectedStorageProfile,
createObjects: []runtime.Object{
&rookCephv1.CephBlockPool{
ObjectMeta: metav1.ObjectMeta{
Name: "test-rejected-blockpool",
Namespace: namespaceName,
Labels: map[string]string{
controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name,
controllers.StorageProfileSpecLabel: rejectedStorageProfile.GetSpecHash(),
},
}, Spec: rookCephv1.NamedBlockPoolSpec{
Name: "spec",
PoolSpec: rookCephv1.PoolSpec{
FailureDomain: "zone",
DeviceClass: deviceClass,
Parameters: map[string]string{},
},
},
},
},
},
}

for _, c := range primaryTestCases {
caseCounter++
caseLabel := fmt.Sprintf("Case %d: %s", caseCounter, c.label)
fmt.Println(caseLabel)

r := createFakeReconciler(t)
r.storageCluster.Spec.DefaultStorageProfile = c.storageProfile.Name
r.StorageClassRequest.Spec.Type = "blockpool"

r.StorageClassRequest.Spec.StorageProfile = c.storageProfile.Name

c.createObjects = append(c.createObjects, c.storageProfile)
c.createObjects = append(c.createObjects, fakeStorageConsumer)

fakeClient := fake.NewClientBuilder().WithScheme(r.Scheme).WithRuntimeObjects(c.createObjects...)
r.Client = fakeClient.Build()

_, err = r.reconcilePhases()
if c.failureExpected {
assert.Error(t, err, caseLabel)
continue
}
assert.NoError(t, err, caseLabel)

assert.Equal(t, c.expectedPoolName, r.cephBlockPool.Name, caseLabel)

if strings.Contains(c.expectedPoolName, "valid") {
expectedStorageProfileParameters := validStorageProfile.Spec.BlockPoolConfiguration.Parameters
actualBlockPoolParameters := r.cephBlockPool.Spec.Parameters
assert.Equal(t, expectedStorageProfileParameters, actualBlockPoolParameters, caseLabel)
assert.NotEqual(t, v1.StorageProfilePhaseRejected, c.storageProfile.Status.Phase)
} else {
actualBlockPoolParameters := r.cephBlockPool.Spec.Parameters
assert.Equal(t, v1.StorageProfilePhaseRejected, c.storageProfile.Status.Phase)
assert.Nil(t, actualBlockPoolParameters, caseLabel)
}
}

}

func TestStorageProfileCephFsSubVolGroup(t *testing.T) {
var err error
var caseCounter int

var primaryTestCases = []struct {
label string
expectedGroupName string
failureExpected bool
createObjects []runtime.Object
cephResources []*v1alpha1.CephResourcesSpec
storageProfile *v1.StorageProfile
cephFs *rookCephv1.CephFilesystem
}{
{
label: "valid profile",
expectedGroupName: "test-subvolgroup",
storageProfile: fakeStorageProfile,
cephFs: fakeCephFs,
failureExpected: false,
cephResources: []*v1alpha1.CephResourcesSpec{
{
Name: "test-subvolgroup",
Kind: "CephFilesystemSubVolumeGroup",
},
},
createObjects: []runtime.Object{
&rookCephv1.CephFilesystemSubVolumeGroup{
ObjectMeta: metav1.ObjectMeta{
Name: "test-subvolgroup",
Namespace: namespaceName,
},
Status: &rookCephv1.CephFilesystemSubVolumeGroupStatus{},
},
},
},
{
label: "rejected profile",
expectedGroupName: "test-subvolgroup",
storageProfile: rejectedStorageProfile,
cephFs: fakeCephFs,
failureExpected: true,
cephResources: []*v1alpha1.CephResourcesSpec{
{
Name: "test-subvolgroup",
Kind: "CephFilesystemSubVolumeGroup",
},
},
createObjects: []runtime.Object{
&rookCephv1.CephFilesystemSubVolumeGroup{
ObjectMeta: metav1.ObjectMeta{
Name: "test-subvolgroup",
Namespace: namespaceName,
},
Status: &rookCephv1.CephFilesystemSubVolumeGroupStatus{},
},
},
},
}

for _, c := range primaryTestCases {
caseCounter++
caseLabel := fmt.Sprintf("Case %d: %s", caseCounter, c.label)
fmt.Println(caseLabel)

r := createFakeReconciler(t)
if strings.Contains(c.label, "rejected") {
r.storageCluster.Spec.DefaultStorageProfile = rejectedStorageProfile.Name
}

r.StorageClassRequest.Status.CephResources = c.cephResources
r.StorageClassRequest.Spec.Type = "sharedfilesystem"
r.StorageClassRequest.Spec.StorageProfile = c.storageProfile.Name

c.createObjects = append(c.createObjects, c.cephFs)
c.createObjects = append(c.createObjects, c.storageProfile)
c.createObjects = append(c.createObjects, fakeStorageConsumer)
fakeClient := fake.NewClientBuilder().WithScheme(r.Scheme).WithRuntimeObjects(c.createObjects...)

r.Client = fakeClient.Build()
r.StorageClassRequest.Status.CephResources = c.cephResources

_, err = r.reconcilePhases()
if c.failureExpected {
assert.Error(t, err, caseLabel)
continue
}
assert.NoError(t, err, caseLabel)
assert.Equal(t, c.expectedGroupName, r.cephFilesystemSubVolumeGroup.Name, caseLabel)
}
}

func TestCephBlockPool(t *testing.T) {
var err error
var caseCounter int
Expand Down
85 changes: 79 additions & 6 deletions controllers/storagecluster/cephfilesystem_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package storagecluster

import (
"context"
"strings"
"testing"

cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
Expand All @@ -17,23 +18,95 @@ import (

func TestCephFileSystem(t *testing.T) {
var cases = []struct {
label string
createRuntimeObjects bool
label string
createRuntimeObjects bool
remoteStorageConsumers bool
}{
{
label: "case 1",
createRuntimeObjects: false,
label: "Not in provider mode",
createRuntimeObjects: false,
remoteStorageConsumers: false,
},
{
label: "In provider mode",
createRuntimeObjects: false,
remoteStorageConsumers: true,
},
}
spList := getMockStorageProfiles()

for _, c := range cases {
var objects []client.Object
t, reconciler, cr, request := initStorageClusterResourceCreateUpdateTest(t, objects, nil)

providerModeSpec := &api.StorageClusterSpec{
AllowRemoteStorageConsumers: c.remoteStorageConsumers,
ProviderAPIServerServiceType: "",
}

t, reconcilerOCSInit, cr, requestOCSInit, requestsStorageProfiles := initStorageClusterResourceCreateUpdateTestProviderMode(
t, objects, providerModeSpec, spList, c.remoteStorageConsumers)
if c.createRuntimeObjects {
objects = createUpdateRuntimeObjects(t) //nolint:staticcheck //no need to use objects as they update in runtime
}
assertCephFileSystem(t, reconciler, cr, request)
if c.remoteStorageConsumers {
assertCephFileSystemProviderMode(t, reconcilerOCSInit, cr, requestOCSInit, requestsStorageProfiles)
} else {
assertCephFileSystem(t, reconcilerOCSInit, cr, requestOCSInit)
}

}
}

func assertCephFileSystemProviderMode(t *testing.T, reconciler StorageClusterReconciler, cr *api.StorageCluster, requestOCSInit reconcile.Request, requestsStorageProfiles []reconcile.Request) {
actualFs := &cephv1.CephFilesystem{
ObjectMeta: metav1.ObjectMeta{
Name: "ocsinit-cephfilesystem",
},
Spec: cephv1.FilesystemSpec{
DataPools: []cephv1.NamedPoolSpec{
{Name: "fast", PoolSpec: cephv1.PoolSpec{DeviceClass: "fast"}},
{Name: "med", PoolSpec: cephv1.PoolSpec{DeviceClass: "med"}},
{Name: "slow", PoolSpec: cephv1.PoolSpec{DeviceClass: "slow"}},
},
},
}
requestOCSInit.Name = "ocsinit-cephfilesystem"
err := reconciler.Client.Get(context.TODO(), requestOCSInit.NamespacedName, actualFs)
assert.NoError(t, err)

storageProfiles := &api.StorageProfileList{}
err = reconciler.Client.List(context.TODO(), storageProfiles)
assert.NoError(t, err)
assert.Equal(t, len(storageProfiles.Items), len(requestsStorageProfiles))
assert.Equal(t, len(storageProfiles.Items)-1, len(actualFs.Spec.DataPools))

expectedCephFS, err := reconciler.newCephFilesystemInstances(cr)
assert.NoError(t, err)

assert.Equal(t, len(expectedCephFS[0].OwnerReferences), 1)

assert.Equal(t, expectedCephFS[0].ObjectMeta.Name, actualFs.ObjectMeta.Name)
assert.Equal(t, expectedCephFS[0].Spec, actualFs.Spec)
assert.Equal(t, expectedCephFS[0].Spec.DataPools[0].Name, actualFs.Spec.DataPools[0].Name)
assert.Equal(t, expectedCephFS[0].Spec.DataPools[1].Name, actualFs.Spec.DataPools[1].Name)
assert.Equal(t, expectedCephFS[0].Spec.DataPools[2].Name, actualFs.Spec.DataPools[2].Name)
assert.Equal(t, expectedCephFS[0].Spec.DataPools[0].PoolSpec.DeviceClass, actualFs.Spec.DataPools[0].PoolSpec.DeviceClass)
assert.Equal(t, expectedCephFS[0].Spec.DataPools[1].PoolSpec.DeviceClass, actualFs.Spec.DataPools[1].PoolSpec.DeviceClass)
assert.Equal(t, expectedCephFS[0].Spec.DataPools[2].PoolSpec.DeviceClass, actualFs.Spec.DataPools[2].PoolSpec.DeviceClass)

for i := range requestsStorageProfiles {
actualStorageProfile := &api.StorageProfile{}
requestStorageProfile := requestsStorageProfiles[i]
err = reconciler.Client.Get(context.TODO(), requestStorageProfile.NamespacedName, actualStorageProfile)
assert.NoError(t, err)
assert.Equal(t, requestStorageProfile.Name, actualStorageProfile.Name)

phaseStorageProfile := api.StorageProfilePhase("")
if strings.Contains(requestStorageProfile.Name, "blank") {
phaseStorageProfile = api.StorageProfilePhaseRejected
}
assert.Equal(t, phaseStorageProfile, actualStorageProfile.Status.Phase)
}
}

func assertCephFileSystem(t *testing.T, reconciler StorageClusterReconciler, cr *api.StorageCluster, request reconcile.Request) {
Expand Down
Loading

0 comments on commit 6b1a306

Please sign in to comment.