diff --git a/api/v1alpha1/drplacementcontrol_types.go b/api/v1alpha1/drplacementcontrol_types.go index b4cb8d04a..ce7182d71 100644 --- a/api/v1alpha1/drplacementcontrol_types.go +++ b/api/v1alpha1/drplacementcontrol_types.go @@ -167,6 +167,16 @@ type PlacementDecision struct { ClusterNamespace string `json:"clusterNamespace,omitempty"` } +// ProtectedPVCsList defines a group of ProtectedPVCs +type ProtectedPVCsList struct { + // Name of the VolRep/PVC resource + //+optional + Name string `json:"name,omitempty"` + + // All the protected pvcs + ProtectedPVCs []string `json:"protectedpvcs,omitempty"` +} + // VRGResourceMeta represents the VRG resource. type VRGResourceMeta struct { // Kind is the kind of the Kubernetes resource. @@ -185,6 +195,10 @@ type VRGResourceMeta struct { //+optional ProtectedPVCs []string `json:"protectedpvcs,omitempty"` + // List of CGs that are protected by the VRG resource + //+optional + ProtectedCGs []ProtectedPVCsList `json:"protectedcgs,omitempty"` + // ResourceVersion is a value used to identify the version of the // VRG resource object //+optional diff --git a/api/v1alpha1/volumereplicationgroup_types.go b/api/v1alpha1/volumereplicationgroup_types.go index 0332a0275..059ef0739 100644 --- a/api/v1alpha1/volumereplicationgroup_types.go +++ b/api/v1alpha1/volumereplicationgroup_types.go @@ -307,6 +307,14 @@ type ProtectedPVC struct { VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"` } +// ProtectedCG defines a group of ProtectedPVCs +type ProtectedCG struct { + ProtectedPVC `json:",inline"` + + // List the protected pvcs names + PVCs []string `json:"protectedPVCs,omitempty"` +} + type KubeObjectsCaptureIdentifier struct { Number int64 `json:"number"` //+nullable @@ -325,8 +333,9 @@ type KubeObjectProtectionStatus struct { type VolumeReplicationGroupStatus struct { State State `json:"state,omitempty"` - // All the protected pvcs + // One of ProtectedPVCs or ProtectedCGs must be set ProtectedPVCs []ProtectedPVC `json:"protectedPVCs,omitempty"` + ProtectedCGs []ProtectedCG `json:"protectedCGs,omitempty"` // Conditions are the list of VRG's summary conditions and their status. Conditions []metav1.Condition `json:"conditions,omitempty"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 411337c47..12b7328e3 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -786,6 +786,27 @@ func (in *PlacementDecision) DeepCopy() *PlacementDecision { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedCG) DeepCopyInto(out *ProtectedCG) { + *out = *in + in.ProtectedPVC.DeepCopyInto(&out.ProtectedPVC) + if in.PVCs != nil { + in, out := &in.PVCs, &out.PVCs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedCG. +func (in *ProtectedCG) DeepCopy() *ProtectedCG { + if in == nil { + return nil + } + out := new(ProtectedCG) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProtectedPVC) DeepCopyInto(out *ProtectedPVC) { *out = *in @@ -853,6 +874,26 @@ func (in *ProtectedPVC) DeepCopy() *ProtectedPVC { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedPVCsList) DeepCopyInto(out *ProtectedPVCsList) { + *out = *in + if in.ProtectedPVCs != nil { + in, out := &in.ProtectedPVCs, &out.ProtectedPVCs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedPVCsList. +func (in *ProtectedPVCsList) DeepCopy() *ProtectedPVCsList { + if in == nil { + return nil + } + out := new(ProtectedPVCsList) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProtectedVolumeReplicationGroupList) DeepCopyInto(out *ProtectedVolumeReplicationGroupList) { *out = *in @@ -1434,6 +1475,13 @@ func (in *VRGResourceMeta) DeepCopyInto(out *VRGResourceMeta) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.ProtectedCGs != nil { + in, out := &in.ProtectedCGs, &out.ProtectedCGs + *out = make([]ProtectedPVCsList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VRGResourceMeta. @@ -1637,6 +1685,13 @@ func (in *VolumeReplicationGroupStatus) DeepCopyInto(out *VolumeReplicationGroup (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ProtectedCGs != nil { + in, out := &in.ProtectedCGs, &out.ProtectedCGs + *out = make([]ProtectedCG, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]v1.Condition, len(*in)) diff --git a/config/crd/bases/ramendr.openshift.io_drplacementcontrols.yaml b/config/crd/bases/ramendr.openshift.io_drplacementcontrols.yaml index 6b255d74f..b6c3c717c 100644 --- a/config/crd/bases/ramendr.openshift.io_drplacementcontrols.yaml +++ b/config/crd/bases/ramendr.openshift.io_drplacementcontrols.yaml @@ -535,6 +535,21 @@ spec: description: Namespace is the namespace of the Kubernetes resource. type: string + protectedcgs: + description: List of CGs that are protected by the VRG resource + items: + description: ProtectedPVCsList defines a group of ProtectedPVCs + properties: + name: + description: Name of the VolRep/PVC resource + type: string + protectedpvcs: + description: All the protected pvcs + items: + type: string + type: array + type: object + type: array protectedpvcs: description: List of PVCs that are protected by the VRG resource items: diff --git a/config/crd/bases/ramendr.openshift.io_protectedvolumereplicationgrouplists.yaml b/config/crd/bases/ramendr.openshift.io_protectedvolumereplicationgrouplists.yaml index 84cccf093..813096c8c 100644 --- a/config/crd/bases/ramendr.openshift.io_protectedvolumereplicationgrouplists.yaml +++ b/config/crd/bases/ramendr.openshift.io_protectedvolumereplicationgrouplists.yaml @@ -872,8 +872,235 @@ spec: type: integer prepareForFinalSyncComplete: type: boolean + protectedCGs: + items: + description: ProtectedCG defines a group of ProtectedPVCs + properties: + accessModes: + description: AccessModes set in the claim to be replicated + items: + type: string + type: array + annotations: + additionalProperties: + type: string + description: Annotations for the PVC + type: object + conditions: + description: Conditions for this protected pvc + items: + description: "Condition contains details for one + aspect of the current state of this API Resource.\n---\nThis + struct is intended for direct use as an array + at the field path .status.conditions. For example,\n\n\n\ttype + FooStatus struct{\n\t // Represents the observations + of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t + \ // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of + True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + csiProvisioner: + description: |- + StorageProvisioners contains the provisioner name of the CSI driver used to provision this + PVC (extracted from the storageClass that was used for provisioning) + type: string + labels: + additionalProperties: + type: string + description: Labels for the PVC + type: object + lastSyncBytes: + description: Bytes transferred per sync, if protected + in async mode only + format: int64 + type: integer + lastSyncDuration: + description: |- + Duration of recent synchronization for PVC, if + protected in the async or volsync mode + type: string + lastSyncTime: + description: |- + Time of the most recent successful synchronization for the PVC, if + protected in the async or volsync mode + format: date-time + type: string + name: + description: Name of the VolRep/PVC resource + type: string + namespace: + description: Name of the namespace the PVC is in + type: string + protectedByVolSync: + description: VolSyncPVC can be used to denote whether + this PVC is protected by VolSync. Defaults to "false". + type: boolean + protectedPVCs: + description: List the protected pvcs names + items: + type: string + type: array + replicationID: + description: |- + ReplicationID contains the globally unique replication identifier, as reported by the storage backend + on the VolumeReplicationClass as the value for the label "ramendr.openshift.io/replicationid", that + identifies the storage backends across 2 (or more) storage instances where the volume is replicated + It also contains any maintenance modes that the replication backend requires during vaious Ramen actions + properties: + id: + description: |- + ID contains the globally unique storage identifier that identifies + the storage or replication backend + type: string + modes: + description: |- + Modes is a list of maintenance modes that need to be activated on the storage + backend, prior to various Ramen related orchestration. This is read from the label + "ramendr.openshift.io/maintenancemodes" on the StorageClass or VolumeReplicationClass, + the value for which is a comma separated list of maintenance modes. + items: + description: |- + MMode defines a maintenance mode, that a storage backend may be requested to act on, based on the DR orchestration + in progress for one or more workloads whose PVCs use the specific storage provisioner + enum: + - Failover + type: string + type: array + required: + - id + type: object + resources: + description: Resources set in the claim to be replicated + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + storageClassName: + description: Name of the StorageClass required by + the claim. + type: string + storageID: + description: |- + StorageID contains the globally unique storage identifier, as reported by the storage backend + on the StorageClass as the value for the label "ramendr.openshift.io/storageid", that identifies + the storage backend that was used to provision the volume. It is used to label different StorageClasses + across different kubernetes clusters, that potentially share the same storage backend. + It also contains any maintenance modes that the storage backend requires during vaious Ramen actions + properties: + id: + description: |- + ID contains the globally unique storage identifier that identifies + the storage or replication backend + type: string + modes: + description: |- + Modes is a list of maintenance modes that need to be activated on the storage + backend, prior to various Ramen related orchestration. This is read from the label + "ramendr.openshift.io/maintenancemodes" on the StorageClass or VolumeReplicationClass, + the value for which is a comma separated list of maintenance modes. + items: + description: |- + MMode defines a maintenance mode, that a storage backend may be requested to act on, based on the DR orchestration + in progress for one or more workloads whose PVCs use the specific storage provisioner + enum: + - Failover + type: string + type: array + required: + - id + type: object + volumeMode: + description: VolumeMode describes how a volume is + intended to be consumed, either Block or Filesystem. + type: string + type: object + type: array protectedPVCs: - description: All the protected pvcs + description: One of ProtectedPVCs or ProtectedCGs must be + set items: properties: accessModes: diff --git a/config/crd/bases/ramendr.openshift.io_volumereplicationgroups.yaml b/config/crd/bases/ramendr.openshift.io_volumereplicationgroups.yaml index 417d55fd8..3de5d55d7 100644 --- a/config/crd/bases/ramendr.openshift.io_volumereplicationgroups.yaml +++ b/config/crd/bases/ramendr.openshift.io_volumereplicationgroups.yaml @@ -808,8 +808,232 @@ spec: type: integer prepareForFinalSyncComplete: type: boolean + protectedCGs: + items: + description: ProtectedCG defines a group of ProtectedPVCs + properties: + accessModes: + description: AccessModes set in the claim to be replicated + items: + type: string + type: array + annotations: + additionalProperties: + type: string + description: Annotations for the PVC + type: object + conditions: + description: Conditions for this protected pvc + items: + description: "Condition contains details for one aspect of + the current state of this API Resource.\n---\nThis struct + is intended for direct use as an array at the field path + .status.conditions. For example,\n\n\n\ttype FooStatus + struct{\n\t // Represents the observations of a foo's + current state.\n\t // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // + +listType=map\n\t // +listMapKey=type\n\t Conditions + []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" + patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + csiProvisioner: + description: |- + StorageProvisioners contains the provisioner name of the CSI driver used to provision this + PVC (extracted from the storageClass that was used for provisioning) + type: string + labels: + additionalProperties: + type: string + description: Labels for the PVC + type: object + lastSyncBytes: + description: Bytes transferred per sync, if protected in async + mode only + format: int64 + type: integer + lastSyncDuration: + description: |- + Duration of recent synchronization for PVC, if + protected in the async or volsync mode + type: string + lastSyncTime: + description: |- + Time of the most recent successful synchronization for the PVC, if + protected in the async or volsync mode + format: date-time + type: string + name: + description: Name of the VolRep/PVC resource + type: string + namespace: + description: Name of the namespace the PVC is in + type: string + protectedByVolSync: + description: VolSyncPVC can be used to denote whether this PVC + is protected by VolSync. Defaults to "false". + type: boolean + protectedPVCs: + description: List the protected pvcs names + items: + type: string + type: array + replicationID: + description: |- + ReplicationID contains the globally unique replication identifier, as reported by the storage backend + on the VolumeReplicationClass as the value for the label "ramendr.openshift.io/replicationid", that + identifies the storage backends across 2 (or more) storage instances where the volume is replicated + It also contains any maintenance modes that the replication backend requires during vaious Ramen actions + properties: + id: + description: |- + ID contains the globally unique storage identifier that identifies + the storage or replication backend + type: string + modes: + description: |- + Modes is a list of maintenance modes that need to be activated on the storage + backend, prior to various Ramen related orchestration. This is read from the label + "ramendr.openshift.io/maintenancemodes" on the StorageClass or VolumeReplicationClass, + the value for which is a comma separated list of maintenance modes. + items: + description: |- + MMode defines a maintenance mode, that a storage backend may be requested to act on, based on the DR orchestration + in progress for one or more workloads whose PVCs use the specific storage provisioner + enum: + - Failover + type: string + type: array + required: + - id + type: object + resources: + description: Resources set in the claim to be replicated + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + storageClassName: + description: Name of the StorageClass required by the claim. + type: string + storageID: + description: |- + StorageID contains the globally unique storage identifier, as reported by the storage backend + on the StorageClass as the value for the label "ramendr.openshift.io/storageid", that identifies + the storage backend that was used to provision the volume. It is used to label different StorageClasses + across different kubernetes clusters, that potentially share the same storage backend. + It also contains any maintenance modes that the storage backend requires during vaious Ramen actions + properties: + id: + description: |- + ID contains the globally unique storage identifier that identifies + the storage or replication backend + type: string + modes: + description: |- + Modes is a list of maintenance modes that need to be activated on the storage + backend, prior to various Ramen related orchestration. This is read from the label + "ramendr.openshift.io/maintenancemodes" on the StorageClass or VolumeReplicationClass, + the value for which is a comma separated list of maintenance modes. + items: + description: |- + MMode defines a maintenance mode, that a storage backend may be requested to act on, based on the DR orchestration + in progress for one or more workloads whose PVCs use the specific storage provisioner + enum: + - Failover + type: string + type: array + required: + - id + type: object + volumeMode: + description: VolumeMode describes how a volume is intended to + be consumed, either Block or Filesystem. + type: string + type: object + type: array protectedPVCs: - description: All the protected pvcs + description: One of ProtectedPVCs or ProtectedCGs must be set items: properties: accessModes: diff --git a/internal/controller/drplacementcontrol.go b/internal/controller/drplacementcontrol.go index 5c9c57715..79b1698d8 100644 --- a/internal/controller/drplacementcontrol.go +++ b/internal/controller/drplacementcontrol.go @@ -640,7 +640,8 @@ func requiresRegionalFailoverPrerequisites( } } - for _, protectedPVC := range vrg.Status.ProtectedPVCs { + protectedPVCs := GetProtectedPVCs(vrg) + for _, protectedPVC := range protectedPVCs { if len(protectedPVC.StorageIdentifiers.ReplicationID.Modes) == 0 { continue } @@ -1622,13 +1623,14 @@ func updatePeers( ) []rmn.PeerClass { peerClasses := vrgPeerClasses - for pvcIdx := range vrgFromView.Status.ProtectedPVCs { + protectedPVCs := GetProtectedPVCs(vrgFromView) + for pvcIdx := range protectedPVCs { for policyPeerClassIdx := range policyPeerClasses { if policyPeerClasses[policyPeerClassIdx].StorageClassName == - *vrgFromView.Status.ProtectedPVCs[pvcIdx].StorageClassName { + *protectedPVCs[pvcIdx].StorageClassName { if hasPeerClass( vrgPeerClasses, - *vrgFromView.Status.ProtectedPVCs[pvcIdx].StorageClassName, + *protectedPVCs[pvcIdx].StorageClassName, policyPeerClasses[policyPeerClassIdx].ClusterIDs, ) { break diff --git a/internal/controller/drplacementcontrol_controller.go b/internal/controller/drplacementcontrol_controller.go index 77ceecdcb..c5e3519d2 100644 --- a/internal/controller/drplacementcontrol_controller.go +++ b/internal/controller/drplacementcontrol_controller.go @@ -1278,7 +1278,7 @@ func (r *DRPlacementControlReconciler) updateDRPCStatus( // - The status update is NOT intended for a VRG that should be cleaned up on a peer cluster // It also updates DRPC ConditionProtected based on current state of VRG. // -//nolint:funlen +//nolint:funlen,gocognit,cyclop func (r *DRPlacementControlReconciler) updateResourceCondition( ctx context.Context, drpc *rmn.DRPlacementControl, userPlacement client.Object, ) { @@ -1334,12 +1334,32 @@ func (r *DRPlacementControlReconciler) updateResourceCondition( drpc.Status.ResourceConditions.ResourceMeta.ResourceVersion = vrg.ResourceVersion drpc.Status.ResourceConditions.Conditions = vrg.Status.Conditions - protectedPVCs := []string{} - for _, protectedPVC := range vrg.Status.ProtectedPVCs { - protectedPVCs = append(protectedPVCs, protectedPVC.Name) - } + if !rmnutil.IsCGEnabled(vrg.GetAnnotations()) { + protectedPVCs := []string{} + for _, protectedPVC := range vrg.Status.ProtectedPVCs { + protectedPVCs = append(protectedPVCs, protectedPVC.Name) + } + + drpc.Status.ResourceConditions.ResourceMeta.ProtectedPVCs = protectedPVCs + } else { + protectedCGs := []rmn.ProtectedPVCsList{} + for index := range vrg.Status.ProtectedCGs { + protectedCG := vrg.Status.ProtectedCGs[index] + + protectedcg := rmn.ProtectedPVCsList{ + Name: protectedCG.Name, + ProtectedPVCs: []string{}, + } + + if len(protectedCG.PVCs) != 0 { + protectedcg.ProtectedPVCs = append(protectedcg.ProtectedPVCs, protectedCG.PVCs...) + } - drpc.Status.ResourceConditions.ResourceMeta.ProtectedPVCs = protectedPVCs + protectedCGs = append(protectedCGs, protectedcg) + } + + drpc.Status.ResourceConditions.ResourceMeta.ProtectedCGs = protectedCGs + } if vrg.Status.LastGroupSyncTime != nil || drpc.Spec.Action != rmn.ActionRelocate { drpc.Status.LastGroupSyncTime = vrg.Status.LastGroupSyncTime diff --git a/internal/controller/volumereplicationgroup_controller.go b/internal/controller/volumereplicationgroup_controller.go index 58c11e3fe..b925805ae 100644 --- a/internal/controller/volumereplicationgroup_controller.go +++ b/internal/controller/volumereplicationgroup_controller.go @@ -452,9 +452,16 @@ func (r *VolumeReplicationGroupReconciler) Reconcile(ctx context.Context, req ct v.instance.Spec.Async, cephFSCSIDriverNameOrDefault(v.ramenConfig), volSyncDestinationCopyMethodOrDefault(v.ramenConfig), adminNamespaceVRG) - if v.instance.Status.ProtectedPVCs == nil { - v.instance.Status.ProtectedPVCs = []ramendrv1alpha1.ProtectedPVC{} + if !util.IsCGEnabled(v.instance.GetAnnotations()) { + if v.instance.Status.ProtectedPVCs == nil { + v.instance.Status.ProtectedPVCs = []ramendrv1alpha1.ProtectedPVC{} + } + } else { + if v.instance.Status.ProtectedCGs == nil { + v.instance.Status.ProtectedCGs = []ramendrv1alpha1.ProtectedCG{} + } } + // Save a copy of the instance status to be used for the VRG status update comparison v.instance.Status.DeepCopyInto(&v.savedInstanceStatus) v.vrgStatusPvcNamespacesSetIfUnset() @@ -813,18 +820,36 @@ func (v *VRGInstance) separatePVCsUsingVRGStatus(pvcList *corev1.PersistentVolum for idx := range pvcList.Items { pvc := &pvcList.Items[idx] - for _, protectedPVC := range v.instance.Status.ProtectedPVCs { - if pvc.Name == protectedPVC.Name && pvc.Namespace == protectedPVC.Namespace { - if protectedPVC.ProtectedByVolSync { - v.volSyncPVCs = append(v.volSyncPVCs, *pvc) - } else { - v.volRepPVCs = append(v.volRepPVCs, *pvc) + if !util.IsCGEnabled(v.instance.GetAnnotations()) { + for index := range v.instance.Status.ProtectedPVCs { + protectedPVC := &v.instance.Status.ProtectedPVCs[index] + + v.separatePVC(protectedPVC, pvc, protectedPVC.Name) + } + } else { + for i := range v.instance.Status.ProtectedCGs { + protectedCG := &v.instance.Status.ProtectedCGs[i] + + for index := range protectedCG.PVCs { + v.separatePVC(&protectedCG.ProtectedPVC, pvc, protectedCG.PVCs[index]) } } } } } +func (v *VRGInstance) separatePVC(protectedPVC *ramendrv1alpha1.ProtectedPVC, + pvc *corev1.PersistentVolumeClaim, name string, +) { + if pvc.Name == name && pvc.Namespace == protectedPVC.Namespace { + if protectedPVC.ProtectedByVolSync { + v.volSyncPVCs = append(v.volSyncPVCs, *pvc) + } else { + v.volRepPVCs = append(v.volRepPVCs, *pvc) + } + } +} + //nolint:gocognit, nestif func (v *VRGInstance) validateSyncPVCs(pvcList *corev1.PersistentVolumeClaimList) error { peerClasses := v.instance.Spec.Sync.PeerClasses @@ -903,7 +928,7 @@ func (v *VRGInstance) separatePVCUsingPeerClassAndSC(peerClasses []ramendrv1alph if peerClass == nil { msg := fmt.Sprintf("peerClass matching storageClass %s not found for async PVC", storageClass.GetName()) - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonPeerClassNotFound, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonPeerClassNotFound, msg) return fmt.Errorf(msg) } @@ -1072,7 +1097,7 @@ func (v *VRGInstance) findPeerClassMatchingSC( if peerClass == nil { msg := fmt.Sprintf("peerClass matching storageClass %s not found for PVC", storageClass.GetName()) - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonPeerClassNotFound, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonPeerClassNotFound, msg) return nil, fmt.Errorf(msg) } @@ -1319,6 +1344,7 @@ func (v *VRGInstance) pvcsDeselectedUnprotect() error { return nil } +//nolint:gocognit,cyclop,nestif func (v *VRGInstance) cleanupProtectedPVCs( pvcsVr, pvcsVs map[client.ObjectKey]corev1.PersistentVolumeClaim, log logr.Logger, ) { @@ -1334,25 +1360,51 @@ func (v *VRGInstance) cleanupProtectedPVCs( return } // clean up the PVCs that are part of protected pvcs but not in v.volReps and v.volSyncs - protectedPVCsFiltered := make([]ramendrv1alpha1.ProtectedPVC, 0) + if !util.IsCGEnabled(v.instance.GetAnnotations()) { + // clean up the PVCs that are part of protected pvcs but not in v.volReps and v.volSyncs + protectedPVCsFiltered := make([]ramendrv1alpha1.ProtectedPVC, 0) - for _, protectedPVC := range v.instance.Status.ProtectedPVCs { - pvcNamespacedName := client.ObjectKey{Namespace: protectedPVC.Namespace, Name: protectedPVC.Name} + for _, protectedPVC := range v.instance.Status.ProtectedPVCs { + pvcNamespacedName := client.ObjectKey{Namespace: protectedPVC.Namespace, Name: protectedPVC.Name} - if _, ok := pvcsVr[pvcNamespacedName]; ok { - protectedPVCsFiltered = append(protectedPVCsFiltered, protectedPVC) + if _, ok := pvcsVr[pvcNamespacedName]; ok { + protectedPVCsFiltered = append(protectedPVCsFiltered, protectedPVC) - continue + continue + } + + if _, ok := pvcsVs[pvcNamespacedName]; ok { + protectedPVCsFiltered = append(protectedPVCsFiltered, protectedPVC) + + continue + } } - if _, ok := pvcsVs[pvcNamespacedName]; ok { - protectedPVCsFiltered = append(protectedPVCsFiltered, protectedPVC) + v.instance.Status.ProtectedPVCs = protectedPVCsFiltered + } else { + for _, protectedCG := range v.instance.Status.ProtectedCGs { + // clean up the PVCs that are part of protected pvcs but not in v.volReps and v.volSyncs + protectedPVCsFiltered := make([]string, 0) - continue + for _, protectedPVC := range protectedCG.PVCs { + pvcNamespacedName := client.ObjectKey{Namespace: protectedCG.Namespace, Name: protectedPVC} + + if _, ok := pvcsVr[pvcNamespacedName]; ok { + protectedPVCsFiltered = append(protectedPVCsFiltered, protectedPVC) + + continue + } + + if _, ok := pvcsVs[pvcNamespacedName]; ok { + protectedPVCsFiltered = append(protectedPVCsFiltered, protectedPVC) + + continue + } + } + + protectedCG.PVCs = protectedPVCsFiltered } } - - v.instance.Status.ProtectedPVCs = protectedPVCsFiltered } // processAsSecondary reconciles the current instance of VRG as secondary @@ -1639,10 +1691,38 @@ func (v *VRGInstance) vrgReadyStatus(reason string) *metav1.Condition { return newVRGAsPrimaryReadyCondition(v.instance.Generation, reason, msg) } +func (v *VRGInstance) getProtectedPVC(pvc *corev1.PersistentVolumeClaim) *ramendrv1alpha1.ProtectedPVC { + var protectedPVC *ramendrv1alpha1.ProtectedPVC + + cg, ok := pvc.GetLabels()[ConsistencyGroupLabel] + if ok && util.IsCGEnabled(v.instance.GetAnnotations()) { + cgName := cg + v.instance.Name + + protectedCG := v.findProtectedCG(cgName, pvc.GetNamespace()) + if protectedCG == nil { + protectedCG = v.addProtectedCG(cgName, pvc.GetNamespace()) + } + + if !v.protectedPVCExistsInCG(protectedCG, pvc.GetNamespace(), pvc.GetName()) { + v.addProtectedPVCToCG(protectedCG, pvc.GetName()) + } + + protectedPVC = &protectedCG.ProtectedPVC + } else { + protectedPVC = v.findProtectedPVC(pvc.GetNamespace(), pvc.GetName()) + if protectedPVC == nil { + protectedPVC = v.addProtectedPVC(pvc.GetNamespace(), pvc.GetName()) + } + } + + return protectedPVC +} + func (v *VRGInstance) updateVRGLastGroupSyncTime() { var leastLastSyncTime *metav1.Time - for _, protectedPVC := range v.instance.Status.ProtectedPVCs { + protectedPVCs := v.getProtectedPVCs() + for _, protectedPVC := range protectedPVCs { // If any protected PVC reports nil, report that back (no sync time available) if protectedPVC.LastSyncTime == nil { leastLastSyncTime = nil @@ -1667,7 +1747,8 @@ func (v *VRGInstance) updateVRGLastGroupSyncTime() { func (v *VRGInstance) updateVRGLastGroupSyncDuration() { var maxLastSyncDuration *metav1.Duration - for _, protectedPVC := range v.instance.Status.ProtectedPVCs { + protectedPVCs := v.getProtectedPVCs() + for _, protectedPVC := range protectedPVCs { if maxLastSyncDuration == nil && protectedPVC.LastSyncDuration != nil { maxLastSyncDuration = new(metav1.Duration) *maxLastSyncDuration = *protectedPVC.LastSyncDuration @@ -1687,7 +1768,8 @@ func (v *VRGInstance) updateVRGLastGroupSyncDuration() { func (v *VRGInstance) updateLastGroupSyncBytes() { var totalLastSyncBytes *int64 - for _, protectedPVC := range v.instance.Status.ProtectedPVCs { + protectedPVCs := v.getProtectedPVCs() + for _, protectedPVC := range protectedPVCs { if totalLastSyncBytes == nil && protectedPVC.LastSyncBytes != nil { totalLastSyncBytes = new(int64) *totalLastSyncBytes = *protectedPVC.LastSyncBytes diff --git a/internal/controller/vrg_status_pvcs.go b/internal/controller/vrg_status_pvcs.go index 387a0fa0e..f61ff0bce 100644 --- a/internal/controller/vrg_status_pvcs.go +++ b/internal/controller/vrg_status_pvcs.go @@ -6,8 +6,26 @@ package controllers import ( "github.com/go-logr/logr" ramen "github.com/ramendr/ramen/api/v1alpha1" + "github.com/ramendr/ramen/internal/controller/util" ) +func (v *VRGInstance) getProtectedPVCs() []ramen.ProtectedPVC { + return GetProtectedPVCs(v.instance) +} + +func GetProtectedPVCs(vrg *ramen.VolumeReplicationGroup) []ramen.ProtectedPVC { + protectedPVCs := []ramen.ProtectedPVC{} + if !util.IsCGEnabled(vrg.GetAnnotations()) { + protectedPVCs = vrg.Status.ProtectedPVCs + } else { + for _, protectedCG := range vrg.Status.ProtectedCGs { + protectedPVCs = append(protectedPVCs, protectedCG.ProtectedPVC) + } + } + + return protectedPVCs +} + // findProtectedPVC returns the &VRG.Status.ProtectedPVC[x] for the given pvcName func (v *VRGInstance) findProtectedPVC(pvcNamespaceName, pvcName string) *ramen.ProtectedPVC { return FindProtectedPVC(v.instance, pvcNamespaceName, pvcName) @@ -19,6 +37,16 @@ func FindProtectedPVC(vrg *ramen.VolumeReplicationGroup, pvcNamespaceName, pvcNa return protectedPvc } +func FindProtectedCG(vrg *ramen.VolumeReplicationGroup, cgName, pvcNamespaceName string) *ramen.ProtectedPVC { + protectedCG, _ := FindProtectedCgAndIndex(vrg, cgName, pvcNamespaceName) + + if protectedCG == nil { + return nil + } + + return &protectedCG.ProtectedPVC +} + func (v *VRGInstance) addProtectedPVC(pvcNamespace, pvcName string) *ramen.ProtectedPVC { protectedPVC := &ramen.ProtectedPVC{Namespace: pvcNamespace, Name: pvcName} @@ -27,6 +55,47 @@ func (v *VRGInstance) addProtectedPVC(pvcNamespace, pvcName string) *ramen.Prote return &v.instance.Status.ProtectedPVCs[len(v.instance.Status.ProtectedPVCs)-1] } +func (v *VRGInstance) findProtectedCG(cgName, pvcNamespaceName string) *ramen.ProtectedCG { + for index := range v.instance.Status.ProtectedCGs { + protectedCG := &v.instance.Status.ProtectedCGs[index] + if protectedCG.Namespace == pvcNamespaceName && protectedCG.Name == cgName { + return protectedCG + } + } + + return nil +} + +func (v *VRGInstance) protectedPVCExistsInCG(protectedCG *ramen.ProtectedCG, pvcNamespaceName, pvcName string, +) bool { + for index := range protectedCG.PVCs { + protectedPVC := protectedCG.PVCs[index] + if protectedCG.Namespace == pvcNamespaceName && protectedPVC == pvcName { + return true + } + } + + return false +} + +func (v *VRGInstance) addProtectedCG(cgName, pvcNamespace string) *ramen.ProtectedCG { + protectedCG := &ramen.ProtectedCG{ + ProtectedPVC: ramen.ProtectedPVC{ + Namespace: pvcNamespace, + Name: cgName, + }, + PVCs: []string{}, + } + + v.instance.Status.ProtectedCGs = append(v.instance.Status.ProtectedCGs, *protectedCG) + + return &v.instance.Status.ProtectedCGs[len(v.instance.Status.ProtectedCGs)-1] +} + +func (v *VRGInstance) addProtectedPVCToCG(cg *ramen.ProtectedCG, pvcName string) { + cg.PVCs = append(cg.PVCs, pvcName) +} + func (v *VRGInstance) pvcStatusDeleteIfPresent(pvcNamespaceName, pvcName string, log logr.Logger) { pvcStatus, i := FindProtectedPvcAndIndex(v.instance, pvcNamespaceName, pvcName) if pvcStatus == nil { @@ -45,6 +114,18 @@ func sliceUnorderedElementDelete[T any](s []T, i int) []T { return s[:len(s)-1] } +func FindProtectedCgAndIndex(vrg *ramen.VolumeReplicationGroup, cgName, pvcNamespaceName string, +) (*ramen.ProtectedCG, int) { + for index := range vrg.Status.ProtectedCGs { + protectedCG := &vrg.Status.ProtectedCGs[index] + if protectedCG.Namespace == pvcNamespaceName && protectedCG.Name == cgName { + return protectedCG, index + } + } + + return nil, len(vrg.Status.ProtectedPVCs) +} + func FindProtectedPvcAndIndex( vrg *ramen.VolumeReplicationGroup, pvcNamespaceName, pvcName string, ) (*ramen.ProtectedPVC, int) { diff --git a/internal/controller/vrg_volrep.go b/internal/controller/vrg_volrep.go index cebeb9707..e18d15829 100644 --- a/internal/controller/vrg_volrep.go +++ b/internal/controller/vrg_volrep.go @@ -213,7 +213,7 @@ func (v *VRGInstance) isPVCReadyForSecondary(pvc *corev1.PersistentVolumeClaim, log.Info("VolumeReplication cannot become Secondary, as its PersistentVolumeClaim is not marked for deletion") msg := "unable to transition to Secondary as PVC is not deleted" - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonProgressing, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonProgressing, msg) return !ready } @@ -231,7 +231,7 @@ func (v *VRGInstance) isPVCInUse(pvc *corev1.PersistentVolumeClaim, log logr.Log msg := operation + " failed as PVC is potentially in use by a pod" log.Info(msg, "errorValue", err) - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonProgressing, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonProgressing, msg) return inUse } @@ -242,7 +242,7 @@ func (v *VRGInstance) isPVCInUse(pvc *corev1.PersistentVolumeClaim, log logr.Log msg := operation + " failed as PersistentVolume for PVC is still attached to node(s)" log.Info(msg, "errorValue", err) - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonProgressing, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonProgressing, msg) return inUse } @@ -274,10 +274,7 @@ func (v *VRGInstance) updateProtectedPVCs(pvc *corev1.PersistentVolumeClaim) err v.instance.Name, err) } - protectedPVC := v.findProtectedPVC(pvc.GetNamespace(), pvc.GetName()) - if protectedPVC == nil { - protectedPVC = v.addProtectedPVC(pvc.GetNamespace(), pvc.GetName()) - } + protectedPVC := v.getProtectedPVC(pvc) protectedPVC.ProtectedByVolSync = false protectedPVC.StorageClassName = pvc.Spec.StorageClassName @@ -354,7 +351,7 @@ func (v *VRGInstance) preparePVCForVRProtection(pvc *corev1.PersistentVolumeClai // Since pvc is skipped, mark the condition for the PVC as progressing. Even for // deletion this applies where if the VR protection finalizer is absent for pvc and // it is being deleted. - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonProgressing, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonProgressing, msg) return !requeue, skip } @@ -376,7 +373,7 @@ func (v *VRGInstance) protectPVC(pvc *corev1.PersistentVolumeClaim, log logr.Log case rmnutil.Different: msg := "PVC owned by another resource" log.Info(msg, "owner", rmnutil.OwnerNamespacedName(pvc).String()) - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonError, msg) return !requeue } @@ -388,7 +385,7 @@ func (v *VRGInstance) protectPVC(pvc *corev1.PersistentVolumeClaim, log logr.Log if err := v.reconciler.Update(v.ctx, pvc); err != nil { msg := "Failed to update PVC to add owner and/or Protected Finalizer" log1.Info(msg, "error", err) - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonError, msg) return requeue } @@ -400,7 +397,7 @@ func (v *VRGInstance) protectPVC(pvc *corev1.PersistentVolumeClaim, log logr.Log log.Info("Requeuing, as retaining PersistentVolume failed", "errorValue", err) msg := "Failed to retain PV for PVC" - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonError, msg) return requeue } @@ -411,7 +408,7 @@ func (v *VRGInstance) protectPVC(pvc *corev1.PersistentVolumeClaim, log logr.Log log.Info("Requeuing, as annotating PersistentVolumeClaim failed", "errorValue", err) msg := "Failed to add protected annotatation to PVC" - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonError, msg) return requeue } @@ -421,7 +418,7 @@ func (v *VRGInstance) protectPVC(pvc *corev1.PersistentVolumeClaim, log logr.Log log.Info("Requeuing, as adding label for consistency group failed", "errorValue", err) msg := "Failed to add label for consistency group to PVC" - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonError, msg) return requeue } @@ -615,8 +612,7 @@ func (v *VRGInstance) isArchivedAlready(pvc *corev1.PersistentVolumeClaim, log l func (v *VRGInstance) uploadPVandPVCtoS3Stores(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (err error) { if v.isArchivedAlready(pvc, log) { msg := fmt.Sprintf("PV cluster data already protected for PVC %s", pvc.Name) - v.updatePVCClusterDataProtectedCondition(pvc.Namespace, pvc.Name, - VRGConditionReasonUploaded, msg) + v.updatePVCClusterDataProtectedCondition(pvc, VRGConditionReasonUploaded, msg) return nil } @@ -625,8 +621,7 @@ func (v *VRGInstance) uploadPVandPVCtoS3Stores(pvc *corev1.PersistentVolumeClaim numProfilesToUpload := len(v.instance.Spec.S3Profiles) if numProfilesToUpload == 0 { msg := "Error uploading PV cluster data because VRG spec has no S3 profiles" - v.updatePVCClusterDataProtectedCondition(pvc.Namespace, pvc.Name, - VRGConditionReasonUploadError, msg) + v.updatePVCClusterDataProtectedCondition(pvc, VRGConditionReasonUploadError, msg) v.log.Info(msg) return fmt.Errorf("error uploading cluster data of PV %s because VRG spec has no S3 profiles", @@ -645,8 +640,7 @@ func (v *VRGInstance) uploadPVandPVCtoS3Stores(pvc *corev1.PersistentVolumeClaim msg := fmt.Sprintf("Uploaded PV/PVC cluster data to only %d of %d S3 profile(s): %v", numProfilesUploaded, numProfilesToUpload, s3Profiles) v.log.Info(msg) - v.updatePVCClusterDataProtectedCondition(pvc.Namespace, pvc.Name, - VRGConditionReasonUploadError, msg) + v.updatePVCClusterDataProtectedCondition(pvc, VRGConditionReasonUploadError, msg) return fmt.Errorf(msg) } @@ -655,8 +649,7 @@ func (v *VRGInstance) uploadPVandPVCtoS3Stores(pvc *corev1.PersistentVolumeClaim msg := fmt.Sprintf("failed to add archived annotation for PVC (%s/%s) with error (%v)", pvc.Namespace, pvc.Name, err) v.log.Info(msg) - v.updatePVCClusterDataProtectedCondition(pvc.Namespace, pvc.Name, - VRGConditionReasonClusterDataAnnotationFailed, msg) + v.updatePVCClusterDataProtectedCondition(pvc, VRGConditionReasonClusterDataAnnotationFailed, msg) return fmt.Errorf(msg) } @@ -664,8 +657,7 @@ func (v *VRGInstance) uploadPVandPVCtoS3Stores(pvc *corev1.PersistentVolumeClaim msg := fmt.Sprintf("Done uploading PV/PVC cluster data to %d of %d S3 profile(s): %v", numProfilesUploaded, numProfilesToUpload, s3Profiles) v.log.Info(msg) - v.updatePVCClusterDataProtectedCondition(pvc.Namespace, pvc.Name, - VRGConditionReasonUploaded, msg) + v.updatePVCClusterDataProtectedCondition(pvc, VRGConditionReasonUploaded, msg) return nil } @@ -727,7 +719,7 @@ func (v *VRGInstance) UploadPVandPVCtoS3Stores(pvc *corev1.PersistentVolumeClaim for _, s3ProfileName := range v.instance.Spec.S3Profiles { err := v.UploadPVandPVCtoS3Store(s3ProfileName, pvc) if err != nil { - v.updatePVCClusterDataProtectedCondition(pvc.Namespace, pvc.Name, VRGConditionReasonUploadError, err.Error()) + v.updatePVCClusterDataProtectedCondition(pvc, VRGConditionReasonUploadError, err.Error()) rmnutil.ReportIfNotPresent(v.reconciler.eventRecorder, v.instance, corev1.EventTypeWarning, rmnutil.EventReasonUploadFailed, err.Error()) @@ -1145,9 +1137,9 @@ func (v *VRGInstance) processVRAsPrimary(vrNamespacedName types.NamespacedName, // condition where both async and sync are enabled at the same time. if v.instance.Spec.Sync != nil { msg := "PVC in the VolumeReplicationGroup is ready for use" - v.updatePVCDataReadyCondition(vrNamespacedName.Namespace, vrNamespacedName.Name, VRGConditionReasonReady, msg) - v.updatePVCDataProtectedCondition(vrNamespacedName.Namespace, vrNamespacedName.Name, VRGConditionReasonReady, msg) - v.updatePVCLastSyncCounters(vrNamespacedName.Namespace, vrNamespacedName.Name, nil) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonReady, msg) + v.updatePVCDataProtectedCondition(pvc, VRGConditionReasonReady, msg) + v.updatePVCLastSyncCounters(pvc, nil) return false, true, nil } @@ -1177,10 +1169,10 @@ func (v *VRGInstance) processVRAsSecondary(vrNamespacedName types.NamespacedName // condition where both async and sync are enabled at the same time. if v.instance.Spec.Sync != nil { msg := "VolumeReplication resource for the pvc as Secondary is in sync with Primary" - v.updatePVCDataReadyCondition(vrNamespacedName.Namespace, vrNamespacedName.Name, VRGConditionReasonReplicated, msg) - v.updatePVCDataProtectedCondition(vrNamespacedName.Namespace, vrNamespacedName.Name, VRGConditionReasonDataProtected, + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonReplicated, msg) + v.updatePVCDataProtectedCondition(pvc, VRGConditionReasonDataProtected, msg) - v.updatePVCLastSyncCounters(vrNamespacedName.Namespace, vrNamespacedName.Name, nil) + v.updatePVCLastSyncCounters(pvc, nil) return false, true, nil } @@ -1216,7 +1208,7 @@ func (v *VRGInstance) createOrUpdateVR(vrNamespacedName types.NamespacedName, // is it replicating or not. So, mark the protected pvc as error // with condition.status as Unknown. msg := "Failed to get VolumeReplication resource" - v.updatePVCDataReadyCondition(vrNamespacedName.Namespace, vrNamespacedName.Name, VRGConditionReasonErrorUnknown, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonErrorUnknown, msg) return requeue, false, fmt.Errorf("failed to get VolumeReplication resource"+ " (%s/%s) belonging to VolumeReplicationGroup (%s/%s), %w", @@ -1230,7 +1222,7 @@ func (v *VRGInstance) createOrUpdateVR(vrNamespacedName types.NamespacedName, rmnutil.EventReasonVRCreateFailed, err.Error()) msg := "Failed to create VolumeReplication resource" - v.updatePVCDataReadyCondition(vrNamespacedName.Namespace, vrNamespacedName.Name, VRGConditionReasonError, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonError, msg) return requeue, false, fmt.Errorf("failed to create VolumeReplication resource"+ " (%s/%s) belonging to VolumeReplicationGroup (%s/%s), %w", @@ -1239,7 +1231,7 @@ func (v *VRGInstance) createOrUpdateVR(vrNamespacedName types.NamespacedName, // Just created VolRep. Mark status.conditions as Progressing. msg := "Created VolumeReplication resource for PVC" - v.updatePVCDataReadyCondition(vrNamespacedName.Namespace, vrNamespacedName.Name, VRGConditionReasonProgressing, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonProgressing, msg) return !requeue, false, nil } @@ -1306,7 +1298,7 @@ func (v *VRGInstance) updateVR(pvc *corev1.PersistentVolumeClaim, volRep client. rmnutil.EventReasonVRUpdateFailed, err.Error()) msg := "Failed to update VolumeReplication resource" - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonError, msg) return requeue, false, fmt.Errorf("failed to update VolumeReplication resource"+ " (%s/%s) as %s, belonging to VolumeReplicationGroup (%s/%s), %w", @@ -1318,7 +1310,7 @@ func (v *VRGInstance) updateVR(pvc *corev1.PersistentVolumeClaim, volRep client. volRep.GetName(), volRep.GetNamespace(), state)) // Just updated the state of the VolRep. Mark it as progressing. msg := "Updated VolumeReplication resource for PVC" - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonProgressing, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonProgressing, msg) return !requeue, false, nil } @@ -1564,7 +1556,7 @@ func (v *VRGInstance) checkVRStatus(pvc *corev1.PersistentVolumeClaim, volRep cl volRep.GetName(), volRep.GetNamespace())) msg := "VolumeReplication generation not updated in status" - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonProgressing, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonProgressing, msg) return false } @@ -1579,7 +1571,7 @@ func (v *VRGInstance) checkVRStatus(pvc *corev1.PersistentVolumeClaim, volRep cl string(v.instance.Spec.ReplicationState), v.instance.Name, v.instance.Namespace)) msg := "VolumeReplicationGroup state invalid" - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonError, msg) return false } @@ -1625,9 +1617,9 @@ func (v *VRGInstance) validateVRStatus(pvc *corev1.PersistentVolumeClaim, volRep } msg := "PVC in the VolumeReplicationGroup is ready for use" - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonReady, msg) - v.updatePVCDataProtectedCondition(pvc.Namespace, pvc.Name, VRGConditionReasonReady, msg) - v.updatePVCLastSyncCounters(pvc.Namespace, pvc.Name, status) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonReady, msg) + v.updatePVCDataProtectedCondition(pvc, VRGConditionReasonReady, msg) + v.updatePVCLastSyncCounters(pvc, status) v.log.Info(fmt.Sprintf("VolumeReplication resource %s/%s is ready for use", volRep.GetName(), volRep.GetNamespace())) @@ -1645,9 +1637,9 @@ func (v *VRGInstance) validateVRValidatedStatus( conditionMet, condState, errorMsg := isVRConditionMet(volRep, status, volrep.ConditionValidated, metav1.ConditionTrue) if !conditionMet && condState != conditionMissing { defaultMsg := "VolumeReplication resource not validated" - v.updatePVCDataReadyConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, errorMsg, + v.updatePVCDataReadyConditionHelper(pvc, VRGConditionReasonError, errorMsg, defaultMsg) - v.updatePVCDataProtectedConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, errorMsg, + v.updatePVCDataProtectedConditionHelper(pvc, VRGConditionReasonError, errorMsg, defaultMsg) v.log.Info(fmt.Sprintf("%s (VolRep: %s/%s)", defaultMsg, volRep.GetName(), volRep.GetNamespace())) } @@ -1678,9 +1670,9 @@ func (v *VRGInstance) validateVRCompletedStatus(pvc *corev1.PersistentVolumeClai conditionMet, _, msg := isVRConditionMet(volRep, status, volrep.ConditionCompleted, metav1.ConditionTrue) if !conditionMet { defaultMsg := fmt.Sprintf("VolumeReplication resource for pvc not %s to %s", action, stateString) - v.updatePVCDataReadyConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, + v.updatePVCDataReadyConditionHelper(pvc, VRGConditionReasonError, msg, defaultMsg) - v.updatePVCDataProtectedConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, + v.updatePVCDataProtectedConditionHelper(pvc, VRGConditionReasonError, msg, defaultMsg) v.log.Info(fmt.Sprintf("%s (VolRep: %s/%s)", defaultMsg, volRep.GetName(), volRep.GetNamespace())) @@ -1710,7 +1702,7 @@ func (v *VRGInstance) validateVRCompletedStatus(pvc *corev1.PersistentVolumeClai func (v *VRGInstance) validateAdditionalVRStatusForSecondary(pvc *corev1.PersistentVolumeClaim, volRep client.Object, status *volrep.VolumeReplicationStatus, ) bool { - v.updatePVCLastSyncCounters(pvc.Namespace, pvc.Name, nil) + v.updatePVCLastSyncCounters(pvc, nil) conditionMet, _, _ := isVRConditionMet(volRep, status, volrep.ConditionResyncing, metav1.ConditionTrue) if !conditionMet { @@ -1720,10 +1712,10 @@ func (v *VRGInstance) validateAdditionalVRStatusForSecondary(pvc *corev1.Persist conditionMet, _, msg := isVRConditionMet(volRep, status, volrep.ConditionDegraded, metav1.ConditionTrue) if !conditionMet { defaultMsg := "VolumeReplication resource for pvc is not in Degraded condition while resyncing" - v.updatePVCDataProtectedConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, + v.updatePVCDataProtectedConditionHelper(pvc, VRGConditionReasonError, msg, defaultMsg) - v.updatePVCDataReadyConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, + v.updatePVCDataReadyConditionHelper(pvc, VRGConditionReasonError, msg, defaultMsg) v.log.Info(fmt.Sprintf("VolumeReplication resource is not in degraded condition while"+ @@ -1733,8 +1725,8 @@ func (v *VRGInstance) validateAdditionalVRStatusForSecondary(pvc *corev1.Persist } msg = "VolumeReplication resource for the pvc is syncing as Secondary" - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonReplicating, msg) - v.updatePVCDataProtectedCondition(pvc.Namespace, pvc.Name, VRGConditionReasonReplicating, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonReplicating, msg) + v.updatePVCDataProtectedCondition(pvc, VRGConditionReasonReplicating, msg) v.log.Info(fmt.Sprintf("VolumeReplication resource for the pvc is syncing as Secondary (%s/%s)", volRep.GetName(), volRep.GetNamespace())) @@ -1749,10 +1741,10 @@ func (v *VRGInstance) checkResyncCompletionAsSecondary(pvc *corev1.PersistentVol conditionMet, _, msg := isVRConditionMet(volRep, status, volrep.ConditionResyncing, metav1.ConditionFalse) if !conditionMet { defaultMsg := "VolumeReplication resource for pvc not syncing as Secondary" - v.updatePVCDataReadyConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, + v.updatePVCDataReadyConditionHelper(pvc, VRGConditionReasonError, msg, defaultMsg) - v.updatePVCDataProtectedConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, + v.updatePVCDataProtectedConditionHelper(pvc, VRGConditionReasonError, msg, defaultMsg) v.log.Info(fmt.Sprintf("%s (VolRep: %s/%s)", defaultMsg, volRep.GetName(), volRep.GetNamespace())) @@ -1763,10 +1755,10 @@ func (v *VRGInstance) checkResyncCompletionAsSecondary(pvc *corev1.PersistentVol conditionMet, _, msg = isVRConditionMet(volRep, status, volrep.ConditionDegraded, metav1.ConditionFalse) if !conditionMet { defaultMsg := "VolumeReplication resource for pvc is not syncing and is degraded as Secondary" - v.updatePVCDataReadyConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, + v.updatePVCDataReadyConditionHelper(pvc, VRGConditionReasonError, msg, defaultMsg) - v.updatePVCDataProtectedConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, + v.updatePVCDataProtectedConditionHelper(pvc, VRGConditionReasonError, msg, defaultMsg) v.log.Info(fmt.Sprintf("%s (VolRep: %s/%s)", defaultMsg, volRep.GetName(), volRep.GetNamespace())) @@ -1775,8 +1767,8 @@ func (v *VRGInstance) checkResyncCompletionAsSecondary(pvc *corev1.PersistentVol } msg = "VolumeReplication resource for the pvc as Secondary is in sync with Primary" - v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonReplicated, msg) - v.updatePVCDataProtectedCondition(pvc.Namespace, pvc.Name, VRGConditionReasonDataProtected, msg) + v.updatePVCDataReadyCondition(pvc, VRGConditionReasonReplicated, msg) + v.updatePVCDataProtectedCondition(pvc, VRGConditionReasonDataProtected, msg) v.log.Info(fmt.Sprintf("data sync completed as both degraded and resyncing are false for"+ " secondary VolRep (%s/%s)", volRep.GetName(), volRep.GetNamespace())) @@ -1839,27 +1831,22 @@ func isVRConditionMet(volRep client.Object, status *volrep.VolumeReplicationStat // function sends reason as VRGConditionReasonError and the linter // complains about this function always receiving the same reason. func (v *VRGInstance) updatePVCDataReadyConditionHelper( - namespace string, - name string, + pvc *corev1.PersistentVolumeClaim, reason string, //nolint: unparam message, defaultMessage string, ) { if message != "" { - v.updatePVCDataReadyCondition(namespace, name, reason, message) + v.updatePVCDataReadyCondition(pvc, reason, message) return } - v.updatePVCDataReadyCondition(namespace, name, reason, defaultMessage) + v.updatePVCDataReadyCondition(pvc, reason, defaultMessage) } -func (v *VRGInstance) updatePVCDataReadyCondition(pvcNamespace, pvcName, reason, message string) { - protectedPVC := v.findProtectedPVC(pvcNamespace, pvcName) - if protectedPVC == nil { - protectedPVC = v.addProtectedPVC(pvcNamespace, pvcName) - } - +func (v *VRGInstance) updatePVCDataReadyCondition(pvc *corev1.PersistentVolumeClaim, reason, message string) { + protectedPVC := v.getProtectedPVC(pvc) setPVCDataReadyCondition(protectedPVC, reason, message, v.instance.Generation) } @@ -1867,27 +1854,22 @@ func (v *VRGInstance) updatePVCDataReadyCondition(pvcNamespace, pvcName, reason, // function sends reason as VRGConditionReasonError and the linter // complains about this function always receiving the same reason. func (v *VRGInstance) updatePVCDataProtectedConditionHelper( - namespace string, - name string, + pvc *corev1.PersistentVolumeClaim, reason string, //nolint: unparam message, defaultMessage string, ) { if message != "" { - v.updatePVCDataProtectedCondition(namespace, name, reason, message) + v.updatePVCDataProtectedCondition(pvc, reason, message) return } - v.updatePVCDataProtectedCondition(namespace, name, reason, defaultMessage) + v.updatePVCDataProtectedCondition(pvc, reason, defaultMessage) } -func (v *VRGInstance) updatePVCDataProtectedCondition(pvcNamespace, pvcName, reason, message string) { - protectedPVC := v.findProtectedPVC(pvcNamespace, pvcName) - if protectedPVC == nil { - protectedPVC = v.addProtectedPVC(pvcNamespace, pvcName) - } - +func (v *VRGInstance) updatePVCDataProtectedCondition(pvc *corev1.PersistentVolumeClaim, reason, message string) { + protectedPVC := v.getProtectedPVC(pvc) setPVCDataProtectedCondition(protectedPVC, reason, message, v.instance.Generation) } @@ -1947,12 +1929,9 @@ func setPVCDataProtectedCondition(protectedPVC *ramendrv1alpha1.ProtectedPVC, re } } -func (v *VRGInstance) updatePVCClusterDataProtectedCondition(pvcNamespace, pvcName, reason, message string) { - protectedPVC := v.findProtectedPVC(pvcNamespace, pvcName) - if protectedPVC == nil { - protectedPVC = v.addProtectedPVC(pvcNamespace, pvcName) - } - +func (v *VRGInstance) updatePVCClusterDataProtectedCondition(pvc *corev1.PersistentVolumeClaim, reason, message string, +) { + protectedPVC := v.getProtectedPVC(pvc) setPVCClusterDataProtectedCondition(protectedPVC, reason, message, v.instance.Generation) } @@ -1973,11 +1952,10 @@ func setPVCClusterDataProtectedCondition(protectedPVC *ramendrv1alpha1.Protected } } -func (v *VRGInstance) updatePVCLastSyncCounters(pvcNamespace, pvcName string, status *volrep.VolumeReplicationStatus) { - protectedPVC := v.findProtectedPVC(pvcNamespace, pvcName) - if protectedPVC == nil { - return - } +func (v *VRGInstance) updatePVCLastSyncCounters(pvc *corev1.PersistentVolumeClaim, + status *volrep.VolumeReplicationStatus, +) { + protectedPVC := v.getProtectedPVC(pvc) if status == nil { protectedPVC.LastSyncTime = nil @@ -2692,10 +2670,12 @@ func (v *VRGInstance) aggregateVolRepDataReadyCondition() *metav1.Condition { return v.vrgReadyStatus(VRGConditionReasonUnused) } - vrgReady := len(v.instance.Status.ProtectedPVCs) != 0 + protectedPVCs := v.getProtectedPVCs() + + vrgReady := len(protectedPVCs) != 0 vrgProgressing := false - for _, protectedPVC := range v.instance.Status.ProtectedPVCs { + for _, protectedPVC := range protectedPVCs { if protectedPVC.ProtectedByVolSync { continue } @@ -2773,7 +2753,8 @@ func (v *VRGInstance) aggregateVolRepDataProtectedCondition() *metav1.Condition vrgProtected := true vrgReplicating := false - for _, protectedPVC := range v.instance.Status.ProtectedPVCs { + protectedPVCs := v.getProtectedPVCs() + for _, protectedPVC := range protectedPVCs { if protectedPVC.ProtectedByVolSync { continue } @@ -2855,7 +2836,8 @@ func (v *VRGInstance) aggregateVolRepClusterDataProtectedCondition() *metav1.Con atleastOneProtecting := false - for _, protectedPVC := range v.instance.Status.ProtectedPVCs { + protectedPVCs := v.getProtectedPVCs() + for _, protectedPVC := range protectedPVCs { if protectedPVC.ProtectedByVolSync { continue } diff --git a/internal/controller/vrg_volrep_test.go b/internal/controller/vrg_volrep_test.go index 44a91619c..8d7868c55 100644 --- a/internal/controller/vrg_volrep_test.go +++ b/internal/controller/vrg_volrep_test.go @@ -886,7 +886,7 @@ var _ = Describe("VolumeReplicationGroupVolRepController", func() { vrgVGRDeleteEnsureTestCase.protectDeletionOfVolGroupReps() By("Starting the VRG deletion process") - vrgVGRDeleteEnsureTestCase.cleanupPVCs(pvcProtectedVerify, vrAndPvcDeletionTimestampsRecentVerify) + vrgVGRDeleteEnsureTestCase.cleanupPVCs(cgProtectedVerify, vrAndPvcDeletionTimestampsRecentVerify) vrg := vrgVGRDeleteEnsureTestCase.getVRG() Expect(k8sClient.Delete(context.TODO(), vrg)).To(Succeed()) @@ -951,7 +951,7 @@ var _ = Describe("VolumeReplicationGroupVolRepController", func() { vrgCreateVGRTestCase.verifyVRGStatusExpectation(true, vrgController.VRGConditionReasonReady) }) It("cleans up after testing", func() { - vrgCreateVGRTestCase.cleanupProtected() + vrgCreateVGRTestCase.cleanupProtectedCGs() }) }) @@ -1000,7 +1000,7 @@ var _ = Describe("VolumeReplicationGroupVolRepController", func() { vrgPVCnotBoundVGRTestCase.verifyVRGStatusExpectation(true, vrgController.VRGConditionReasonReady) }) It("cleans up after testing", func() { - vrgPVCnotBoundVGRTestCase.cleanupProtected() + vrgPVCnotBoundVGRTestCase.cleanupProtectedCGs() }) }) @@ -2491,6 +2491,10 @@ func (v *vrgTest) cleanupProtected() { v.cleanup(pvcProtectedVerify) } +func (v *vrgTest) cleanupProtectedCGs() { + v.cleanup(cgProtectedVerify) +} + func (v *vrgTest) cleanup( pvcPreDeleteVerify pvcPreDeleteVerify, ) { @@ -2606,6 +2610,40 @@ func vrgPvcStatusGet( return *vrgPvcStatus } +func cgProtectedVerify( + vrg ramendrv1alpha1.VolumeReplicationGroup, pvcNamespacedName types.NamespacedName, pvName string, +) { + vrgPvcStatus := vrgCgStatusGet(vrg, pvcNamespacedName) + + By("setting VRG's CG ClusterDataProtected status to true") + pvcClusterDataProtectedStatusVerify(vrgPvcStatus, Equal(metav1.ConditionTrue)) + pvAndPvcObjectReplicasPresentVerify(client.ObjectKeyFromObject(&vrg), pvcNamespacedName, pvName) +} + +func vrgCgStatusGet( + vrg ramendrv1alpha1.VolumeReplicationGroup, pvcNamespacedName types.NamespacedName, +) ramendrv1alpha1.ProtectedPVC { + pvcLookupKey := types.NamespacedName{Name: pvcNamespacedName.Name, Namespace: pvcNamespacedName.Namespace} + PVC := &corev1.PersistentVolumeClaim{} + + Eventually(func() bool { + err := k8sClient.Get(context.TODO(), pvcLookupKey, PVC) + + return err == nil + }, timeout, interval).Should(BeTrue(), + "while waiting for PVC %+v", PVC) + + cg, ok := PVC.GetLabels()[vrgController.ConsistencyGroupLabel] + Expect(ok).To(BeTrue()) + + cgName := cg + vrg.Name + + vrgPvcStatus := vrgController.FindProtectedCG(&vrg, cgName, pvcNamespacedName.Namespace) + Expect(vrgPvcStatus).ToNot(BeNil()) + + return *vrgPvcStatus +} + func pvcClusterDataProtectedStatusVerify( vrgPvcStatus ramendrv1alpha1.ProtectedPVC, matcher gomegatypes.GomegaMatcher, @@ -3219,7 +3257,7 @@ func (v *vrgTest) waitForVGRProtectedPVCs(vrNamespacedName types.NamespacedName, protected := false for idx := range pvcList.Items { pvc := pvcList.Items[idx] - protectedPVC := vrgController.FindProtectedPVC(vrg, pvc.Namespace, pvc.Name) + protectedPVC := vrgController.FindProtectedCG(vrg, vrNamespacedName.Name, pvc.Namespace) if protectedPVC == nil { continue }