Skip to content

Commit 6b1a306

Browse files
raaizikjarrpa
andcommitted
SP unit tests
- Cephfs tests added for cases of provider and non-provider modes - storageclassrequests tests added Signed-off-by: raaizik <[email protected]> Co-authored-by: Jose A. Rivera <[email protected]>
1 parent 6e7dad4 commit 6b1a306

File tree

4 files changed

+551
-8
lines changed

4 files changed

+551
-8
lines changed

controllers/storageclassrequest/storageclassrequest_controller_test.go

Lines changed: 238 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ package storageclassrequest
1616
import (
1717
"context"
1818
"fmt"
19+
"strings"
1920
"testing"
2021

2122
v1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
@@ -33,6 +34,9 @@ import (
3334
)
3435

3536
const (
37+
pgAutoscaleMode = "pg_autoscale_mode"
38+
pgNum = "pg_num"
39+
pgpNum = "pgp_num"
3640
namespaceName = "test-ns"
3741
deviceClass = "ssd"
3842
storageProfileKind = "StorageProfile"
@@ -49,6 +53,46 @@ var fakeStorageProfile = &v1.StorageProfile{
4953
},
5054
}
5155

56+
var validStorageProfile = &v1.StorageProfile{
57+
TypeMeta: metav1.TypeMeta{Kind: storageProfileKind},
58+
ObjectMeta: metav1.ObjectMeta{
59+
Name: "valid",
60+
Namespace: namespaceName,
61+
},
62+
Spec: v1.StorageProfileSpec{
63+
DeviceClass: deviceClass,
64+
BlockPoolConfiguration: v1.BlockPoolConfigurationSpec{
65+
Parameters: map[string]string{
66+
pgAutoscaleMode: "on",
67+
pgNum: "128",
68+
pgpNum: "128",
69+
},
70+
},
71+
},
72+
Status: v1.StorageProfileStatus{Phase: ""},
73+
}
74+
75+
// A rejected StorageProfile is one that is invalid due to having a blank device class field and is set to
76+
// Rejected in its phase.
77+
var rejectedStorageProfile = &v1.StorageProfile{
78+
TypeMeta: metav1.TypeMeta{Kind: storageProfileKind},
79+
ObjectMeta: metav1.ObjectMeta{
80+
Name: "rejected",
81+
Namespace: namespaceName,
82+
},
83+
Spec: v1.StorageProfileSpec{
84+
DeviceClass: "",
85+
BlockPoolConfiguration: v1.BlockPoolConfigurationSpec{
86+
Parameters: map[string]string{
87+
pgAutoscaleMode: "on",
88+
pgNum: "128",
89+
pgpNum: "128",
90+
},
91+
},
92+
},
93+
Status: v1.StorageProfileStatus{Phase: ""},
94+
}
95+
5296
var fakeStorageCluster = &v1.StorageCluster{
5397
ObjectMeta: metav1.ObjectMeta{
5498
Name: "test-storagecluster",
@@ -225,6 +269,200 @@ func TestProfileReconcile(t *testing.T) {
225269
assert.NoError(t, err, caseLabel)
226270
}
227271

272+
func TestStorageProfileCephBlockPool(t *testing.T) {
273+
var err error
274+
var caseCounter int
275+
276+
var primaryTestCases = []struct {
277+
label string
278+
expectedPoolName string
279+
failureExpected bool
280+
createObjects []runtime.Object
281+
storageProfile *v1.StorageProfile
282+
}{
283+
{
284+
label: "valid profile",
285+
expectedPoolName: "test-valid-blockpool",
286+
failureExpected: false,
287+
storageProfile: validStorageProfile,
288+
createObjects: []runtime.Object{
289+
&rookCephv1.CephBlockPool{
290+
ObjectMeta: metav1.ObjectMeta{
291+
Name: "test-valid-blockpool",
292+
Namespace: namespaceName,
293+
Labels: map[string]string{
294+
controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name,
295+
controllers.StorageProfileSpecLabel: validStorageProfile.GetSpecHash(),
296+
},
297+
}, Spec: rookCephv1.NamedBlockPoolSpec{
298+
Name: "spec",
299+
PoolSpec: rookCephv1.PoolSpec{
300+
FailureDomain: "zone",
301+
DeviceClass: deviceClass,
302+
Parameters: map[string]string{},
303+
},
304+
},
305+
},
306+
},
307+
},
308+
{
309+
label: "rejected profile",
310+
expectedPoolName: "test-rejected-blockpool",
311+
failureExpected: true,
312+
storageProfile: rejectedStorageProfile,
313+
createObjects: []runtime.Object{
314+
&rookCephv1.CephBlockPool{
315+
ObjectMeta: metav1.ObjectMeta{
316+
Name: "test-rejected-blockpool",
317+
Namespace: namespaceName,
318+
Labels: map[string]string{
319+
controllers.StorageConsumerNameLabel: fakeStorageConsumer.Name,
320+
controllers.StorageProfileSpecLabel: rejectedStorageProfile.GetSpecHash(),
321+
},
322+
}, Spec: rookCephv1.NamedBlockPoolSpec{
323+
Name: "spec",
324+
PoolSpec: rookCephv1.PoolSpec{
325+
FailureDomain: "zone",
326+
DeviceClass: deviceClass,
327+
Parameters: map[string]string{},
328+
},
329+
},
330+
},
331+
},
332+
},
333+
}
334+
335+
for _, c := range primaryTestCases {
336+
caseCounter++
337+
caseLabel := fmt.Sprintf("Case %d: %s", caseCounter, c.label)
338+
fmt.Println(caseLabel)
339+
340+
r := createFakeReconciler(t)
341+
r.storageCluster.Spec.DefaultStorageProfile = c.storageProfile.Name
342+
r.StorageClassRequest.Spec.Type = "blockpool"
343+
344+
r.StorageClassRequest.Spec.StorageProfile = c.storageProfile.Name
345+
346+
c.createObjects = append(c.createObjects, c.storageProfile)
347+
c.createObjects = append(c.createObjects, fakeStorageConsumer)
348+
349+
fakeClient := fake.NewClientBuilder().WithScheme(r.Scheme).WithRuntimeObjects(c.createObjects...)
350+
r.Client = fakeClient.Build()
351+
352+
_, err = r.reconcilePhases()
353+
if c.failureExpected {
354+
assert.Error(t, err, caseLabel)
355+
continue
356+
}
357+
assert.NoError(t, err, caseLabel)
358+
359+
assert.Equal(t, c.expectedPoolName, r.cephBlockPool.Name, caseLabel)
360+
361+
if strings.Contains(c.expectedPoolName, "valid") {
362+
expectedStorageProfileParameters := validStorageProfile.Spec.BlockPoolConfiguration.Parameters
363+
actualBlockPoolParameters := r.cephBlockPool.Spec.Parameters
364+
assert.Equal(t, expectedStorageProfileParameters, actualBlockPoolParameters, caseLabel)
365+
assert.NotEqual(t, v1.StorageProfilePhaseRejected, c.storageProfile.Status.Phase)
366+
} else {
367+
actualBlockPoolParameters := r.cephBlockPool.Spec.Parameters
368+
assert.Equal(t, v1.StorageProfilePhaseRejected, c.storageProfile.Status.Phase)
369+
assert.Nil(t, actualBlockPoolParameters, caseLabel)
370+
}
371+
}
372+
373+
}
374+
375+
func TestStorageProfileCephFsSubVolGroup(t *testing.T) {
376+
var err error
377+
var caseCounter int
378+
379+
var primaryTestCases = []struct {
380+
label string
381+
expectedGroupName string
382+
failureExpected bool
383+
createObjects []runtime.Object
384+
cephResources []*v1alpha1.CephResourcesSpec
385+
storageProfile *v1.StorageProfile
386+
cephFs *rookCephv1.CephFilesystem
387+
}{
388+
{
389+
label: "valid profile",
390+
expectedGroupName: "test-subvolgroup",
391+
storageProfile: fakeStorageProfile,
392+
cephFs: fakeCephFs,
393+
failureExpected: false,
394+
cephResources: []*v1alpha1.CephResourcesSpec{
395+
{
396+
Name: "test-subvolgroup",
397+
Kind: "CephFilesystemSubVolumeGroup",
398+
},
399+
},
400+
createObjects: []runtime.Object{
401+
&rookCephv1.CephFilesystemSubVolumeGroup{
402+
ObjectMeta: metav1.ObjectMeta{
403+
Name: "test-subvolgroup",
404+
Namespace: namespaceName,
405+
},
406+
Status: &rookCephv1.CephFilesystemSubVolumeGroupStatus{},
407+
},
408+
},
409+
},
410+
{
411+
label: "rejected profile",
412+
expectedGroupName: "test-subvolgroup",
413+
storageProfile: rejectedStorageProfile,
414+
cephFs: fakeCephFs,
415+
failureExpected: true,
416+
cephResources: []*v1alpha1.CephResourcesSpec{
417+
{
418+
Name: "test-subvolgroup",
419+
Kind: "CephFilesystemSubVolumeGroup",
420+
},
421+
},
422+
createObjects: []runtime.Object{
423+
&rookCephv1.CephFilesystemSubVolumeGroup{
424+
ObjectMeta: metav1.ObjectMeta{
425+
Name: "test-subvolgroup",
426+
Namespace: namespaceName,
427+
},
428+
Status: &rookCephv1.CephFilesystemSubVolumeGroupStatus{},
429+
},
430+
},
431+
},
432+
}
433+
434+
for _, c := range primaryTestCases {
435+
caseCounter++
436+
caseLabel := fmt.Sprintf("Case %d: %s", caseCounter, c.label)
437+
fmt.Println(caseLabel)
438+
439+
r := createFakeReconciler(t)
440+
if strings.Contains(c.label, "rejected") {
441+
r.storageCluster.Spec.DefaultStorageProfile = rejectedStorageProfile.Name
442+
}
443+
444+
r.StorageClassRequest.Status.CephResources = c.cephResources
445+
r.StorageClassRequest.Spec.Type = "sharedfilesystem"
446+
r.StorageClassRequest.Spec.StorageProfile = c.storageProfile.Name
447+
448+
c.createObjects = append(c.createObjects, c.cephFs)
449+
c.createObjects = append(c.createObjects, c.storageProfile)
450+
c.createObjects = append(c.createObjects, fakeStorageConsumer)
451+
fakeClient := fake.NewClientBuilder().WithScheme(r.Scheme).WithRuntimeObjects(c.createObjects...)
452+
453+
r.Client = fakeClient.Build()
454+
r.StorageClassRequest.Status.CephResources = c.cephResources
455+
456+
_, err = r.reconcilePhases()
457+
if c.failureExpected {
458+
assert.Error(t, err, caseLabel)
459+
continue
460+
}
461+
assert.NoError(t, err, caseLabel)
462+
assert.Equal(t, c.expectedGroupName, r.cephFilesystemSubVolumeGroup.Name, caseLabel)
463+
}
464+
}
465+
228466
func TestCephBlockPool(t *testing.T) {
229467
var err error
230468
var caseCounter int

controllers/storagecluster/cephfilesystem_test.go

Lines changed: 79 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package storagecluster
22

33
import (
44
"context"
5+
"strings"
56
"testing"
67

78
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
@@ -17,23 +18,95 @@ import (
1718

1819
func TestCephFileSystem(t *testing.T) {
1920
var cases = []struct {
20-
label string
21-
createRuntimeObjects bool
21+
label string
22+
createRuntimeObjects bool
23+
remoteStorageConsumers bool
2224
}{
2325
{
24-
label: "case 1",
25-
createRuntimeObjects: false,
26+
label: "Not in provider mode",
27+
createRuntimeObjects: false,
28+
remoteStorageConsumers: false,
29+
},
30+
{
31+
label: "In provider mode",
32+
createRuntimeObjects: false,
33+
remoteStorageConsumers: true,
2634
},
2735
}
36+
spList := getMockStorageProfiles()
37+
2838
for _, c := range cases {
2939
var objects []client.Object
30-
t, reconciler, cr, request := initStorageClusterResourceCreateUpdateTest(t, objects, nil)
40+
41+
providerModeSpec := &api.StorageClusterSpec{
42+
AllowRemoteStorageConsumers: c.remoteStorageConsumers,
43+
ProviderAPIServerServiceType: "",
44+
}
45+
46+
t, reconcilerOCSInit, cr, requestOCSInit, requestsStorageProfiles := initStorageClusterResourceCreateUpdateTestProviderMode(
47+
t, objects, providerModeSpec, spList, c.remoteStorageConsumers)
3148
if c.createRuntimeObjects {
3249
objects = createUpdateRuntimeObjects(t) //nolint:staticcheck //no need to use objects as they update in runtime
3350
}
34-
assertCephFileSystem(t, reconciler, cr, request)
51+
if c.remoteStorageConsumers {
52+
assertCephFileSystemProviderMode(t, reconcilerOCSInit, cr, requestOCSInit, requestsStorageProfiles)
53+
} else {
54+
assertCephFileSystem(t, reconcilerOCSInit, cr, requestOCSInit)
55+
}
56+
57+
}
58+
}
59+
60+
func assertCephFileSystemProviderMode(t *testing.T, reconciler StorageClusterReconciler, cr *api.StorageCluster, requestOCSInit reconcile.Request, requestsStorageProfiles []reconcile.Request) {
61+
actualFs := &cephv1.CephFilesystem{
62+
ObjectMeta: metav1.ObjectMeta{
63+
Name: "ocsinit-cephfilesystem",
64+
},
65+
Spec: cephv1.FilesystemSpec{
66+
DataPools: []cephv1.NamedPoolSpec{
67+
{Name: "fast", PoolSpec: cephv1.PoolSpec{DeviceClass: "fast"}},
68+
{Name: "med", PoolSpec: cephv1.PoolSpec{DeviceClass: "med"}},
69+
{Name: "slow", PoolSpec: cephv1.PoolSpec{DeviceClass: "slow"}},
70+
},
71+
},
3572
}
73+
requestOCSInit.Name = "ocsinit-cephfilesystem"
74+
err := reconciler.Client.Get(context.TODO(), requestOCSInit.NamespacedName, actualFs)
75+
assert.NoError(t, err)
3676

77+
storageProfiles := &api.StorageProfileList{}
78+
err = reconciler.Client.List(context.TODO(), storageProfiles)
79+
assert.NoError(t, err)
80+
assert.Equal(t, len(storageProfiles.Items), len(requestsStorageProfiles))
81+
assert.Equal(t, len(storageProfiles.Items)-1, len(actualFs.Spec.DataPools))
82+
83+
expectedCephFS, err := reconciler.newCephFilesystemInstances(cr)
84+
assert.NoError(t, err)
85+
86+
assert.Equal(t, len(expectedCephFS[0].OwnerReferences), 1)
87+
88+
assert.Equal(t, expectedCephFS[0].ObjectMeta.Name, actualFs.ObjectMeta.Name)
89+
assert.Equal(t, expectedCephFS[0].Spec, actualFs.Spec)
90+
assert.Equal(t, expectedCephFS[0].Spec.DataPools[0].Name, actualFs.Spec.DataPools[0].Name)
91+
assert.Equal(t, expectedCephFS[0].Spec.DataPools[1].Name, actualFs.Spec.DataPools[1].Name)
92+
assert.Equal(t, expectedCephFS[0].Spec.DataPools[2].Name, actualFs.Spec.DataPools[2].Name)
93+
assert.Equal(t, expectedCephFS[0].Spec.DataPools[0].PoolSpec.DeviceClass, actualFs.Spec.DataPools[0].PoolSpec.DeviceClass)
94+
assert.Equal(t, expectedCephFS[0].Spec.DataPools[1].PoolSpec.DeviceClass, actualFs.Spec.DataPools[1].PoolSpec.DeviceClass)
95+
assert.Equal(t, expectedCephFS[0].Spec.DataPools[2].PoolSpec.DeviceClass, actualFs.Spec.DataPools[2].PoolSpec.DeviceClass)
96+
97+
for i := range requestsStorageProfiles {
98+
actualStorageProfile := &api.StorageProfile{}
99+
requestStorageProfile := requestsStorageProfiles[i]
100+
err = reconciler.Client.Get(context.TODO(), requestStorageProfile.NamespacedName, actualStorageProfile)
101+
assert.NoError(t, err)
102+
assert.Equal(t, requestStorageProfile.Name, actualStorageProfile.Name)
103+
104+
phaseStorageProfile := api.StorageProfilePhase("")
105+
if strings.Contains(requestStorageProfile.Name, "blank") {
106+
phaseStorageProfile = api.StorageProfilePhaseRejected
107+
}
108+
assert.Equal(t, phaseStorageProfile, actualStorageProfile.Status.Phase)
109+
}
37110
}
38111

39112
func assertCephFileSystem(t *testing.T, reconciler StorageClusterReconciler, cr *api.StorageCluster, request reconcile.Request) {

0 commit comments

Comments
 (0)