diff --git a/config/rbac/oauth_proxy_role.yaml b/config/rbac/oauth_proxy_role.yaml index 7925b8b3bb..31a4b2f506 100644 --- a/config/rbac/oauth_proxy_role.yaml +++ b/config/rbac/oauth_proxy_role.yaml @@ -11,3 +11,9 @@ rules: resources: - subjectaccessreviews verbs: ["create"] +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - create diff --git a/controllers/storagecluster/storageclasses.go b/controllers/storagecluster/storageclasses.go index 8fedd34b52..c708130a25 100644 --- a/controllers/storagecluster/storageclasses.go +++ b/controllers/storagecluster/storageclasses.go @@ -30,8 +30,8 @@ const ( ) var ( - rbdDriverName = storageclassDriverNamePrefix + ".rbd.csi.ceph.com" - cephFSDriverName = storageclassDriverNamePrefix + ".cephfs.csi.ceph.com" + RbdDriverName = storageclassDriverNamePrefix + ".rbd.csi.ceph.com" + CephFSDriverName = storageclassDriverNamePrefix + ".cephfs.csi.ceph.com" nfsDriverName = storageclassDriverNamePrefix + ".nfs.csi.ceph.com" obcDriverName = storageclassDriverNamePrefix + ".ceph.rook.io/bucket" ) @@ -108,7 +108,7 @@ func (r *StorageClusterReconciler) createStorageClasses(sccs []StorageClassConfi sc := scc.storageClass switch { - case (strings.Contains(sc.Name, "-ceph-rbd") || (strings.Contains(sc.Provisioner, rbdDriverName)) && !strings.Contains(sc.Name, "-ceph-non-resilient-rbd")) && !scc.isClusterExternal: + case (strings.Contains(sc.Name, "-ceph-rbd") || (strings.Contains(sc.Provisioner, RbdDriverName)) && !strings.Contains(sc.Name, "-ceph-non-resilient-rbd")) && !scc.isClusterExternal: // wait for CephBlockPool to be ready cephBlockPool := cephv1.CephBlockPool{} key := types.NamespacedName{Name: sc.Parameters["pool"], Namespace: namespace} @@ -170,7 +170,7 @@ func (r *StorageClusterReconciler) createStorageClasses(sccs []StorageClassConfi skippedSC = append(skippedSC, sc.Name) continue } - case (strings.Contains(sc.Name, "-cephfs") || strings.Contains(sc.Provisioner, cephFSDriverName)) && !scc.isClusterExternal: + case (strings.Contains(sc.Name, "-cephfs") || strings.Contains(sc.Provisioner, CephFSDriverName)) && !scc.isClusterExternal: // wait for CephFilesystem to be ready cephFilesystem := cephv1.CephFilesystem{} key := types.NamespacedName{Name: sc.Parameters["fsName"], Namespace: namespace} @@ -254,7 +254,7 @@ func newCephFilesystemStorageClassConfiguration(initData *ocsv1.StorageCluster) "description": "Provides RWO and RWX Filesystem volumes", }, }, - Provisioner: cephFSDriverName, + Provisioner: CephFSDriverName, ReclaimPolicy: &persistentVolumeReclaimDelete, // AllowVolumeExpansion is set to true to enable expansion of OCS backed Volumes AllowVolumeExpansion: &allowVolumeExpansion, @@ -289,7 +289,7 @@ func newCephBlockPoolStorageClassConfiguration(initData *ocsv1.StorageCluster) S "reclaimspace.csiaddons.openshift.io/schedule": "@weekly", }, }, - Provisioner: rbdDriverName, + Provisioner: RbdDriverName, ReclaimPolicy: &persistentVolumeReclaimDelete, // AllowVolumeExpansion is set to true to enable expansion of OCS backed Volumes AllowVolumeExpansion: &allowVolumeExpansion, @@ -345,7 +345,7 @@ func newNonResilientCephBlockPoolStorageClassConfiguration(initData *ocsv1.Stora "reclaimspace.csiaddons.openshift.io/schedule": "@weekly", }, }, - Provisioner: rbdDriverName, + Provisioner: RbdDriverName, ReclaimPolicy: &persistentVolumeReclaimDelete, VolumeBindingMode: &volumeBindingWaitForFirstConsumer, // AllowVolumeExpansion is set to true to enable expansion of OCS backed Volumes diff --git a/deploy/csv-templates/ocs-operator.csv.yaml.in b/deploy/csv-templates/ocs-operator.csv.yaml.in index e9bf5cc09c..34a4148d4b 100644 --- a/deploy/csv-templates/ocs-operator.csv.yaml.in +++ b/deploy/csv-templates/ocs-operator.csv.yaml.in @@ -515,6 +515,12 @@ spec: - subjectaccessreviews verbs: - create + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - create serviceAccountName: ux-backend-server deployments: - name: ocs-operator diff --git a/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml b/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml index 4c34a84c73..33c736da7a 100644 --- a/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml +++ b/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml @@ -524,6 +524,12 @@ spec: - subjectaccessreviews verbs: - create + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - create serviceAccountName: ux-backend-server deployments: - name: ocs-operator diff --git a/deploy/ocs-operator/manifests/ux_backend_role.yaml b/deploy/ocs-operator/manifests/ux_backend_role.yaml index f334ad0e76..cd5f7d2252 100644 --- a/deploy/ocs-operator/manifests/ux_backend_role.yaml +++ b/deploy/ocs-operator/manifests/ux_backend_role.yaml @@ -21,3 +21,10 @@ rules: verbs: - get - list + - update +- apiGroups: + - ceph.rook.io + resources: + - cephblockpools + verbs: + - create diff --git a/rbac/ux_backend_role.yaml b/rbac/ux_backend_role.yaml index f334ad0e76..cd5f7d2252 100644 --- a/rbac/ux_backend_role.yaml +++ b/rbac/ux_backend_role.yaml @@ -21,3 +21,10 @@ rules: verbs: - get - list + - update +- apiGroups: + - ceph.rook.io + resources: + - cephblockpools + verbs: + - create diff --git a/services/ux-backend/handlers/expandstorage/handler.go b/services/ux-backend/handlers/expandstorage/handler.go new file mode 100644 index 0000000000..9af2804e6b --- /dev/null +++ b/services/ux-backend/handlers/expandstorage/handler.go @@ -0,0 +1,298 @@ +package expandstorage + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1" + storagecluster "github.com/red-hat-storage/ocs-operator/v4/controllers/storagecluster" + "github.com/red-hat-storage/ocs-operator/v4/services/ux-backend/handlers" + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func HandleMessage(w http.ResponseWriter, r *http.Request, client client.Client, namespace string) { + switch r.Method { + case "POST": + handlePost(w, r, client, namespace) + default: + handleUnsupportedMethod(w, r) + } +} + +func handlePost(w http.ResponseWriter, r *http.Request, client client.Client, namespace string) { + // When ContentLength is 0 that means request body is empty + var err error + if r.ContentLength == 0 { + klog.Errorf("body in the request is required") + http.Error(w, "body in the request is required", http.StatusBadRequest) + return + } + + type poolDetails struct { + VolumeType string `json:"volumeType"` + PoolName string `json:"poolName"` + DataProtectionPolicy int `json:"dataProtectionPolicy"` + EnableCompression bool `json:"enableCompression"` + FilesystemName string `json:"filesystemName"` + FailureDomain string `json:"failureDomain"` + } + + type storageClassDetails struct { + ReclaimPolicy string `json:"reclaimPolicy"` + Name string `json:"name"` + VolumeBindingMode string `json:"volumeBindingMode"` + EnableStorageClassEncryption bool `json:"enableStorageClassEncryption"` + EncryptionKMSID string `json:"encryptionKMSID"` + } + + var ExpandStorage = struct { + StorageClassForOSDs string `json:"storageClassForOSDs"` + EnableEncryption bool `json:"enableEncryption"` + Storage string `json:"storage"` + Replica int `json:"replica"` + Count int `json:"count"` + StorageClusterName string `json:"storageClusterName"` + PoolDetails poolDetails `json:"poolDetails"` + StorageClassDetails storageClassDetails `json:"storageClassDetails"` + }{} + + if err = json.NewDecoder(r.Body).Decode(&ExpandStorage); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + storageCluster := &ocsv1.StorageCluster{} + err = client.Get(r.Context(), types.NamespacedName{Name: ExpandStorage.StorageClusterName, Namespace: namespace}, storageCluster) + if err != nil { + klog.Errorf("failed to get storageCluster: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // update storageCluster + updateStorageCluster(w, r, client, ExpandStorage.StorageClassForOSDs, ExpandStorage.EnableEncryption, ExpandStorage.Storage, ExpandStorage.Count, ExpandStorage.Replica, storageCluster) + + if ExpandStorage.PoolDetails.VolumeType == "block" { + klog.Info("Creating cephBlockPool and storageClass") + // create cephBlockPool + createCephBlockPool(w, r, client, ExpandStorage.PoolDetails.PoolName, ExpandStorage.StorageClassForOSDs, ExpandStorage.PoolDetails.DataProtectionPolicy, ExpandStorage.PoolDetails.EnableCompression, namespace, ExpandStorage.PoolDetails.FailureDomain, storageCluster.Spec.Arbiter.Enable) + + // create storageClass + createCephBlockPoolStorageClass(w, r, client, ExpandStorage.StorageClassDetails.Name, ExpandStorage.PoolDetails.PoolName, ExpandStorage.StorageClassDetails.ReclaimPolicy, ExpandStorage.StorageClassDetails.VolumeBindingMode, ExpandStorage.StorageClassDetails.EnableStorageClassEncryption, ExpandStorage.StorageClassDetails.EncryptionKMSID, namespace) + } else if ExpandStorage.PoolDetails.VolumeType == "filesystem" { + klog.Info("Creating cephFilesystem dataPool and storageClass") + // create cephFilesystem dataPool + createCephFilesystemDataPool(w, r, client, ExpandStorage.PoolDetails.PoolName, ExpandStorage.StorageClassForOSDs, ExpandStorage.PoolDetails.DataProtectionPolicy, ExpandStorage.PoolDetails.EnableCompression, ExpandStorage.PoolDetails.FailureDomain, storageCluster) + + // create storageClass + createCephFilesystemStorageClass(w, r, client, ExpandStorage.StorageClassDetails.Name, ExpandStorage.PoolDetails.PoolName, ExpandStorage.StorageClassDetails.ReclaimPolicy, ExpandStorage.StorageClassDetails.VolumeBindingMode, ExpandStorage.PoolDetails.FilesystemName, namespace) + } else { + klog.Errorf("invalid volumeType: %s", ExpandStorage.PoolDetails.VolumeType) + http.Error(w, "invalid volumeType", http.StatusBadRequest) + return + } + +} + +func updateStorageCluster(w http.ResponseWriter, r *http.Request, client client.Client, storageClassForOSDs string, enableEncryption bool, storage string, count, replica int, storageCluster *ocsv1.StorageCluster) { + klog.Infof("Updating storageCluster %q", storageCluster.Name) + storageQty := resource.MustParse(storage) + volumeMode := corev1.PersistentVolumeBlock + deviceSet := ocsv1.StorageDeviceSet{ + Name: storageClassForOSDs, + Count: count, + Replica: replica, + Portable: false, + Encrypted: &enableEncryption, + DeviceClass: storageClassForOSDs, + DataPVCTemplate: corev1.PersistentVolumeClaim{ + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + StorageClassName: &storageClassForOSDs, + VolumeMode: &volumeMode, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: storageQty, + }, + }, + }, + }, + } + + storageCluster.Spec.StorageDeviceSets = append(storageCluster.Spec.StorageDeviceSets, deviceSet) + err := client.Update(r.Context(), storageCluster) + if err != nil { + klog.Errorf("failed to update storageCluster: %q", storageCluster.Name) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func createCephBlockPool(w http.ResponseWriter, r *http.Request, client client.Client, poolName, storageClassForOSDs string, dataProtectionPolicy int, enableCompression bool, namespace, failureDomain string, arbiter bool) { + compression := "none" + if enableCompression { + compression = "aggressive" + } + replicasPerFailureDomain := 1 + if arbiter { + replicasPerFailureDomain = 2 + } + + cephBlockPool := &cephv1.CephBlockPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: poolName, + Namespace: namespace, + }, + Spec: cephv1.NamedBlockPoolSpec{ + PoolSpec: cephv1.PoolSpec{ + FailureDomain: failureDomain, + EnableRBDStats: true, + DeviceClass: storageClassForOSDs, + Replicated: cephv1.ReplicatedSpec{ + Size: uint(dataProtectionPolicy), + RequireSafeReplicaSize: true, + ReplicasPerFailureDomain: uint(replicasPerFailureDomain), + }, + Parameters: map[string]string{ + "compression_mode": compression, + }, + EnableCrushUpdates: true, + }, + }, + } + + err := client.Create(r.Context(), cephBlockPool) + if err != nil { + klog.Errorf("failed to create cephBlockPool: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func createCephBlockPoolStorageClass(w http.ResponseWriter, r *http.Request, client client.Client, storageClassName, poolName, reclaimPolicy, volumeBindingMode string, enableEncryption bool, encryptionKMSID, namespace string) { + allowVolumeExpansion := true + storageClass := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: storageClassName, + Annotations: map[string]string{ + "description": "Provides RWO Filesystem volumes, and RWO and RWX Block volumes", + "reclaimspace.csiaddons.openshift.io/schedule": "@weekly", + }, + }, + Provisioner: storagecluster.RbdDriverName, + ReclaimPolicy: (*corev1.PersistentVolumeReclaimPolicy)(&reclaimPolicy), + VolumeBindingMode: (*storagev1.VolumeBindingMode)(&volumeBindingMode), + // AllowVolumeExpansion is set to true to enable expansion of OCS backed Volumes + AllowVolumeExpansion: &allowVolumeExpansion, + Parameters: map[string]string{ + "clusterID": namespace, + "pool": poolName, + "imageFeatures": "layering,deep-flatten,exclusive-lock,object-map,fast-diff", + "csi.storage.k8s.io/fstype": "ext4", + "imageFormat": "2", + "encrypted": strconv.FormatBool(enableEncryption), + "csi.storage.k8s.io/provisioner-secret-name": "rook-csi-rbd-provisioner", + "csi.storage.k8s.io/provisioner-secret-namespace": namespace, + "csi.storage.k8s.io/node-stage-secret-name": "rook-csi-rbd-node", + "csi.storage.k8s.io/node-stage-secret-namespace": namespace, + "csi.storage.k8s.io/controller-expand-secret-name": "rook-csi-rbd-provisioner", + "csi.storage.k8s.io/controller-expand-secret-namespace": namespace, + }, + } + if enableEncryption { + storageClass.Parameters["encryptionKMSID"] = encryptionKMSID + } + + err := client.Create(r.Context(), storageClass) + if err != nil { + klog.Errorf("failed to create storageClass: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func createCephFilesystemDataPool(w http.ResponseWriter, r *http.Request, client client.Client, poolName, storageClassForOSDs string, dataProtectionPolicy int, enableCompression bool, failureDomain string, storageCluster *ocsv1.StorageCluster) { + compression := "none" + if enableCompression { + compression = "aggressive" + } + + datapool := cephv1.NamedPoolSpec{ + Name: poolName, + PoolSpec: cephv1.PoolSpec{ + FailureDomain: failureDomain, + DeviceClass: storageClassForOSDs, + Replicated: cephv1.ReplicatedSpec{ + Size: uint(dataProtectionPolicy), + RequireSafeReplicaSize: true, + }, + Parameters: map[string]string{ + "compression_mode": compression, + }, + EnableCrushUpdates: true, + }, + } + + storageCluster.Spec.ManagedResources.CephFilesystems.AdditionalDataPools = append(storageCluster.Spec.ManagedResources.CephFilesystems.AdditionalDataPools, datapool) + err := client.Update(r.Context(), storageCluster) + if err != nil { + klog.Errorf("failed to update storageCluster: %q", storageCluster.Name) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func createCephFilesystemStorageClass(w http.ResponseWriter, r *http.Request, client client.Client, storageClassName, poolName, reclaimPolicy, volumeBindingMode string, filesystemName, namespace string) { + allowVolumeExpansion := true + storageClass := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: storageClassName, + Annotations: map[string]string{ + "description": "Provides RWO and RWX Filesystem volumes", + }, + }, + Provisioner: storagecluster.CephFSDriverName, + ReclaimPolicy: (*corev1.PersistentVolumeReclaimPolicy)(&reclaimPolicy), + VolumeBindingMode: (*storagev1.VolumeBindingMode)(&volumeBindingMode), + // AllowVolumeExpansion is set to true to enable expansion of OCS backed Volumes + AllowVolumeExpansion: &allowVolumeExpansion, + Parameters: map[string]string{ + "clusterID": namespace, + "fsName": filesystemName, + "pool": poolName, + "csi.storage.k8s.io/provisioner-secret-name": "rook-csi-cephfs-provisioner", + "csi.storage.k8s.io/provisioner-secret-namespace": namespace, + "csi.storage.k8s.io/node-stage-secret-name": "rook-csi-cephfs-node", + "csi.storage.k8s.io/node-stage-secret-namespace": namespace, + "csi.storage.k8s.io/controller-expand-secret-name": "rook-csi-cephfs-provisioner", + "csi.storage.k8s.io/controller-expand-secret-namespace": namespace, + }, + } + + err := client.Create(r.Context(), storageClass) + if err != nil { + klog.Errorf("failed to create storageClass: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func handleUnsupportedMethod(w http.ResponseWriter, r *http.Request) { + klog.Infof("Only POST method should be used to send data to this endpoint %s", r.URL.Path) + w.WriteHeader(http.StatusMethodNotAllowed) + w.Header().Set("Content-Type", handlers.ContentTypeTextPlain) + w.Header().Set("Allow", "POST") + + if _, err := w.Write([]byte(fmt.Sprintf("Unsupported method : %s", r.Method))); err != nil { + klog.Errorf("failed write data to response writer: %v", err) + } +} diff --git a/services/ux-backend/main.go b/services/ux-backend/main.go index 42abc96137..555d39dc2c 100644 --- a/services/ux-backend/main.go +++ b/services/ux-backend/main.go @@ -9,8 +9,11 @@ import ( ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1" "github.com/red-hat-storage/ocs-operator/v4/controllers/util" + "github.com/red-hat-storage/ocs-operator/v4/services/ux-backend/handlers/expandstorage" "github.com/red-hat-storage/ocs-operator/v4/services/ux-backend/handlers/onboarding/clienttokens" "github.com/red-hat-storage/ocs-operator/v4/services/ux-backend/handlers/onboarding/peertokens" + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + storagev1 "k8s.io/api/storage/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -76,6 +79,12 @@ func main() { if err := ocsv1.AddToScheme(scheme); err != nil { klog.Exitf("failed to add ocsv1 to scheme. %v", err) } + if err := cephv1.AddToScheme(scheme); err != nil { + klog.Exitf("failed to add cephv1 to scheme. %v", err) + } + if err := storagev1.AddToScheme(scheme); err != nil { + klog.Exitf("failed to add storagev1 to scheme. %v", err) + } cl, err := util.NewK8sClient(scheme) if err != nil { @@ -96,6 +105,10 @@ func main() { peertokens.HandleMessage(w, r, config.tokenLifetimeInHours, cl, namespace) }) + http.HandleFunc("/expandstorage", func(w http.ResponseWriter, r *http.Request) { + expandstorage.HandleMessage(w, r, cl, namespace) + }) + klog.Info("ux backend server listening on port ", config.listenPort) addr := fmt.Sprintf("%s%d", ":", config.listenPort)