Skip to content

Commit

Permalink
Merge pull request #530 from parth-gr/cluster_name
Browse files Browse the repository at this point in the history
 Bug 2268174: external: change cluster_name to k8s_cluster_name
  • Loading branch information
travisn authored Sep 9, 2024
2 parents d4ed445 + 25db3cb commit 4d8881f
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 40 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/canary-integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -159,12 +159,12 @@ jobs:
- name: test external script with restricted_auth_permission flag and without having cephfs_filesystem flag
run: |
toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}')
kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool --cluster-name rookstorage --restricted-auth-permission true
kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --restricted-auth-permission true
- name: test external script with restricted_auth_permission flag
run: |
toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}')
kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --cephfs-filesystem-name myfs --rbd-data-pool-name replicapool --cluster-name rookstorage --restricted-auth-permission true
kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --cephfs-filesystem-name myfs --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --restricted-auth-permission true
- name: test the upgrade flag
run: |
Expand All @@ -182,8 +182,8 @@ jobs:
# print existing client auth
kubectl -n rook-ceph exec $toolbox -- ceph auth get client.csi-rbd-node-rookstorage-replicapool
# restricted auth user need to provide --rbd-data-pool-name,
# --cluster-name and --run-as-user flag while upgrading
kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool
# --k8s-cluster-name and --run-as-user flag while upgrading
kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool
# print upgraded client auth
kubectl -n rook-ceph exec $toolbox -- ceph auth get client.csi-rbd-node-rookstorage-replicapool
Expand Down
10 changes: 5 additions & 5 deletions Documentation/CRDs/Cluster/external-cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ python3 create-external-cluster-resources.py --rbd-data-pool-name <pool_name> --
* `--skip-monitoring-endpoint`: (optional) Skip prometheus exporter endpoints, even if they are available. Useful if the prometheus module is not enabled
* `--ceph-conf`: (optional) Provide a Ceph conf file
* `--keyring`: (optional) Path to Ceph keyring file, to be used with `--ceph-conf`
* `--cluster-name`: (optional) Ceph cluster name
* `--k8s-cluster-name`: (optional) Kubernetes cluster name
* `--output`: (optional) Output will be stored into the provided file
* `--dry-run`: (optional) Prints the executed commands without running them
* `--run-as-user`: (optional) Provides a user name to check the cluster's health status, must be prefixed by `client`.
Expand All @@ -58,7 +58,7 @@ python3 create-external-cluster-resources.py --rbd-data-pool-name <pool_name> --
* `--rgw-zone-name`: (optional) Provides the name of the rgw-zone
* `--rgw-zonegroup-name`: (optional) Provides the name of the rgw-zone-group
* `--upgrade`: (optional) Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) and client.healthchecker ceph users with new permissions needed for the new cluster version and older permission will still be applied.
* `--restricted-auth-permission`: (optional) Restrict cephCSIKeyrings auth permissions to specific pools, and cluster. Mandatory flags that need to be set are `--rbd-data-pool-name`, and `--cluster-name`. `--cephfs-filesystem-name` flag can also be passed in case of CephFS user restriction, so it can restrict users to particular CephFS filesystem.
* `--restricted-auth-permission`: (optional) Restrict cephCSIKeyrings auth permissions to specific pools, and cluster. Mandatory flags that need to be set are `--rbd-data-pool-name`, and `--k8s-cluster-name`. `--cephfs-filesystem-name` flag can also be passed in case of CephFS user restriction, so it can restrict users to particular CephFS filesystem.
* `--v2-port-enable`: (optional) Enables the v2 mon port (3300) for mons.

### Multi-tenancy
Expand All @@ -72,7 +72,7 @@ So you would be running different isolated consumer clusters on top of single `S
So apply these secrets only to new `Consumer cluster` deployment while using the same `Source cluster`.

```console
python3 create-external-cluster-resources.py --cephfs-filesystem-name <filesystem-name> --rbd-data-pool-name <pool_name> --cluster-name <cluster-name> --restricted-auth-permission true --format <bash> --rgw-endpoint <rgw_endpoin> --namespace <rook-ceph-external>
python3 create-external-cluster-resources.py --cephfs-filesystem-name <filesystem-name> --rbd-data-pool-name <pool_name> --k8s-cluster-name <k8s-cluster-name> --restricted-auth-permission true --format <bash> --rgw-endpoint <rgw_endpoin> --namespace <rook-ceph-external>
```

### RGW Multisite
Expand All @@ -93,10 +93,10 @@ python3 create-external-cluster-resources.py --upgrade
```

2) If the consumer cluster has restricted caps:
Restricted users created using `--restricted-auth-permission` flag need to pass mandatory flags: '`--rbd-data-pool-name`(if it is a rbd user), `--cluster-name` and `--run-as-user`' flags while upgrading, in case of cephfs users if you have passed `--cephfs-filesystem-name` flag while creating csi-users then while upgrading it will be mandatory too. In this example the user would be `client.csi-rbd-node-rookstorage-replicapool` (following the pattern `csi-user-clusterName-poolName`)
Restricted users created using `--restricted-auth-permission` flag need to pass mandatory flags: '`--rbd-data-pool-name`(if it is a rbd user), `--k8s-cluster-name` and `--run-as-user`' flags while upgrading, in case of cephfs users if you have passed `--cephfs-filesystem-name` flag while creating csi-users then while upgrading it will be mandatory too. In this example the user would be `client.csi-rbd-node-rookstorage-replicapool` (following the pattern `csi-user-clusterName-poolName`)

```console
python3 create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool
python3 create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool
```

!!! note
Expand Down
2 changes: 1 addition & 1 deletion deploy/examples/create-external-cluster-resources-tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def test_method_create_cephCSIKeyring_cephFSProvisioner(self):
)
print(f"cephCSIKeyring without restricting it to a metadata pool. {csiKeyring}")
self.rjObj._arg_parser.restricted_auth_permission = True
self.rjObj._arg_parser.cluster_name = "openshift-storage"
self.rjObj._arg_parser.k8s_cluster_name = "openshift-storage"
csiKeyring = self.rjObj.create_cephCSIKeyring_user(
"client.csi-cephfs-provisioner"
)
Expand Down
60 changes: 30 additions & 30 deletions deploy/examples/create-external-cluster-resources.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ def gen_arg_parser(cls, args_to_parse=None):
help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'",
)
common_group.add_argument(
"--cluster-name", default="", help="Ceph cluster name"
"--k8s-cluster-name", default="", help="Kubernetes cluster name"
)
common_group.add_argument(
"--namespace",
Expand All @@ -338,9 +338,9 @@ def gen_arg_parser(cls, args_to_parse=None):
"--restricted-auth-permission",
default=False,
help="Restrict cephCSIKeyrings auth permissions to specific pools, cluster."
+ "Mandatory flags that need to be set are --rbd-data-pool-name, and --cluster-name."
+ "Mandatory flags that need to be set are --rbd-data-pool-name, and --k8s-cluster-name."
+ "--cephfs-filesystem-name flag can also be passed in case of cephfs user restriction, so it can restrict user to particular cephfs filesystem"
+ "sample run: `python3 /etc/ceph/create-external-cluster-resources.py --cephfs-filesystem-name myfs --rbd-data-pool-name replicapool --cluster-name rookstorage --restricted-auth-permission true`"
+ "sample run: `python3 /etc/ceph/create-external-cluster-resources.py --cephfs-filesystem-name myfs --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --restricted-auth-permission true`"
+ "Note: Restricting the csi-users per pool, and per cluster will require creating new csi-users and new secrets for that csi-users."
+ "So apply these secrets only to new `Consumer cluster` deployment while using the same `Source cluster`.",
)
Expand Down Expand Up @@ -479,9 +479,9 @@ def gen_arg_parser(cls, args_to_parse=None):
help="Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) and client.healthchecker ceph users with new permissions needed for the new cluster version and older permission will still be applied."
+ "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade`, this will upgrade all the default csi users(non-restricted)"
+ "For restricted users(For example: client.csi-cephfs-provisioner-openshift-storage-myfs), users created using --restricted-auth-permission flag need to pass mandatory flags"
+ "mandatory flags: '--rbd-data-pool-name, --cluster-name and --run-as-user' flags while upgrading"
+ "mandatory flags: '--rbd-data-pool-name, --k8s-cluster-name and --run-as-user' flags while upgrading"
+ "in case of cephfs users if you have passed --cephfs-filesystem-name flag while creating user then while upgrading it will be mandatory too"
+ "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool`"
+ "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool`"
+ "PS: An existing non-restricted user cannot be converted to a restricted user by upgrading."
+ "Upgrade flag should only be used to append new permissions to users, it shouldn't be used for changing user already applied permission, for example you shouldn't change in which pool user has access",
)
Expand Down Expand Up @@ -853,16 +853,16 @@ def get_cephfs_provisioner_caps_and_entity(self):
"osd": "allow rw tag cephfs metadata=*",
}
if self._arg_parser.restricted_auth_permission:
cluster_name = self._arg_parser.cluster_name
if cluster_name == "":
k8s_cluster_name = self._arg_parser.k8s_cluster_name
if k8s_cluster_name == "":
raise ExecutionFailureException(
"cluster_name not found, please set the '--cluster-name' flag"
"k8s_cluster_name not found, please set the '--k8s-cluster-name' flag"
)
cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
if cephfs_filesystem == "":
entity = f"{entity}-{cluster_name}"
entity = f"{entity}-{k8s_cluster_name}"
else:
entity = f"{entity}-{cluster_name}-{cephfs_filesystem}"
entity = f"{entity}-{k8s_cluster_name}-{cephfs_filesystem}"
caps["osd"] = f"allow rw tag cephfs metadata={cephfs_filesystem}"

return caps, entity
Expand All @@ -876,21 +876,21 @@ def get_cephfs_node_caps_and_entity(self):
"mds": "allow rw",
}
if self._arg_parser.restricted_auth_permission:
cluster_name = self._arg_parser.cluster_name
if cluster_name == "":
k8s_cluster_name = self._arg_parser.k8s_cluster_name
if k8s_cluster_name == "":
raise ExecutionFailureException(
"cluster_name not found, please set the '--cluster-name' flag"
"k8s_cluster_name not found, please set the '--k8s-cluster-name' flag"
)
cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
if cephfs_filesystem == "":
entity = f"{entity}-{cluster_name}"
entity = f"{entity}-{k8s_cluster_name}"
else:
entity = f"{entity}-{cluster_name}-{cephfs_filesystem}"
entity = f"{entity}-{k8s_cluster_name}-{cephfs_filesystem}"
caps["osd"] = f"allow rw tag cephfs *={cephfs_filesystem}"

return caps, entity

def get_entity(self, entity, rbd_pool_name, alias_rbd_pool_name, cluster_name):
def get_entity(self, entity, rbd_pool_name, alias_rbd_pool_name, k8s_cluster_name):
if (
rbd_pool_name.count(".") != 0
or rbd_pool_name.count("_") != 0
Expand All @@ -908,9 +908,9 @@ def get_entity(self, entity, rbd_pool_name, alias_rbd_pool_name, cluster_name):
raise ExecutionFailureException(
"'--alias-rbd-data-pool-name' flag value should not contain '.' or '_'"
)
entity = f"{entity}-{cluster_name}-{alias_rbd_pool_name}"
entity = f"{entity}-{k8s_cluster_name}-{alias_rbd_pool_name}"
else:
entity = f"{entity}-{cluster_name}-{rbd_pool_name}"
entity = f"{entity}-{k8s_cluster_name}-{rbd_pool_name}"

return entity

Expand All @@ -924,17 +924,17 @@ def get_rbd_provisioner_caps_and_entity(self):
if self._arg_parser.restricted_auth_permission:
rbd_pool_name = self._arg_parser.rbd_data_pool_name
alias_rbd_pool_name = self._arg_parser.alias_rbd_data_pool_name
cluster_name = self._arg_parser.cluster_name
k8s_cluster_name = self._arg_parser.k8s_cluster_name
if rbd_pool_name == "":
raise ExecutionFailureException(
"mandatory flag not found, please set the '--rbd-data-pool-name' flag"
)
if cluster_name == "":
if k8s_cluster_name == "":
raise ExecutionFailureException(
"mandatory flag not found, please set the '--cluster-name' flag"
"mandatory flag not found, please set the '--k8s-cluster-name' flag"
)
entity = self.get_entity(
entity, rbd_pool_name, alias_rbd_pool_name, cluster_name
entity, rbd_pool_name, alias_rbd_pool_name, k8s_cluster_name
)
caps["osd"] = f"profile rbd pool={rbd_pool_name}"

Expand All @@ -949,17 +949,17 @@ def get_rbd_node_caps_and_entity(self):
if self._arg_parser.restricted_auth_permission:
rbd_pool_name = self._arg_parser.rbd_data_pool_name
alias_rbd_pool_name = self._arg_parser.alias_rbd_data_pool_name
cluster_name = self._arg_parser.cluster_name
k8s_cluster_name = self._arg_parser.k8s_cluster_name
if rbd_pool_name == "":
raise ExecutionFailureException(
"mandatory flag not found, please set the '--rbd-data-pool-name' flag"
)
if cluster_name == "":
if k8s_cluster_name == "":
raise ExecutionFailureException(
"mandatory flag not found, please set the '--cluster-name' flag"
"mandatory flag not found, please set the '--k8s-cluster-name' flag"
)
entity = self.get_entity(
entity, rbd_pool_name, alias_rbd_pool_name, cluster_name
entity, rbd_pool_name, alias_rbd_pool_name, k8s_cluster_name
)
caps["osd"] = f"profile rbd pool={rbd_pool_name}"

Expand Down Expand Up @@ -1463,15 +1463,15 @@ def validate_rgw_multisite(self, rgw_multisite_config_name, rgw_multisite_config
def _gen_output_map(self):
if self.out_map:
return
self._arg_parser.cluster_name = (
self._arg_parser.cluster_name.lower()
self._arg_parser.k8s_cluster_name = (
self._arg_parser.k8s_cluster_name.lower()
) # always convert cluster name to lowercase characters
self.validate_rbd_pool()
self.validate_rados_namespace()
self._excluded_keys.add("CLUSTER_NAME")
self._excluded_keys.add("K8S_CLUSTER_NAME")
self.get_cephfs_data_pool_details()
self.out_map["NAMESPACE"] = self._arg_parser.namespace
self.out_map["CLUSTER_NAME"] = self._arg_parser.cluster_name
self.out_map["K8S_CLUSTER_NAME"] = self._arg_parser.k8s_cluster_name
self.out_map["ROOK_EXTERNAL_FSID"] = self.get_fsid()
self.out_map["ROOK_EXTERNAL_USERNAME"] = self.run_as_user
self.out_map["ROOK_EXTERNAL_CEPH_MON_DATA"] = self.get_ceph_external_mon_data()
Expand Down

0 comments on commit 4d8881f

Please sign in to comment.