Skip to content

Commit

Permalink
Merge commit '40832826877151a8745ab61375ad381ea7870aad' into sync_us-…
Browse files Browse the repository at this point in the history
…-master

Signed-off-by: Ceph Jenkins <[email protected]>
  • Loading branch information
Ceph Jenkins committed Oct 5, 2024
2 parents 91dbb6a + 4083282 commit 5162a9e
Show file tree
Hide file tree
Showing 70 changed files with 538 additions and 555 deletions.
72 changes: 67 additions & 5 deletions .github/workflows/canary-integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@ jobs:
tests/scripts/github-action-helper.sh create_partitions_for_osds
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster
run: |
tests/scripts/github-action-helper.sh deploy_cluster
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
- name: setup csi-addons
run: tests/scripts/csiaddons.sh setup_csiaddons
Expand Down Expand Up @@ -364,6 +366,7 @@ jobs:
run: |
export ALLOW_LOOP_DEVICES=true
tests/scripts/github-action-helper.sh deploy_cluster loop
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
tests/scripts/github-action-helper.sh create_operator_toolbox
- name: wait for prepare pod
Expand Down Expand Up @@ -433,7 +436,9 @@ jobs:
tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --wipe-only
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster two_osds_in_device
run: |
tests/scripts/github-action-helper.sh deploy_cluster two_osds_in_device
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
- name: wait for prepare pod
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod 2
Expand Down Expand Up @@ -482,6 +487,7 @@ jobs:
- name: deploy cluster
run: |
tests/scripts/github-action-helper.sh deploy_cluster osd_with_metadata_partition_device
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
- name: wait for prepare pod
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod 1
Expand Down Expand Up @@ -537,7 +543,9 @@ jobs:
tests/scripts/github-action-helper.sh create_LV_on_disk $(sudo losetup --find --show test-rook.img)
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster osd_with_metadata_device
run: |
tests/scripts/github-action-helper.sh deploy_cluster osd_with_metadata_device
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
- name: wait for prepare pod
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod 1
Expand Down Expand Up @@ -587,7 +595,9 @@ jobs:
tests/scripts/create-bluestore-partitions.sh --disk "$BLOCK" --wipe-only
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster encryption
run: |
tests/scripts/github-action-helper.sh deploy_cluster encryption
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster encryption
- name: wait for prepare pod
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod 1
Expand Down Expand Up @@ -642,7 +652,9 @@ jobs:
tests/scripts/github-action-helper.sh create_LV_on_disk $BLOCK
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster lvm
run: |
tests/scripts/github-action-helper.sh deploy_cluster lvm
tests/scripts/github-action-helper.sh deploy_all_additional_resources_on_cluster
- name: wait for prepare pod
run: tests/scripts/github-action-helper.sh wait_for_prepare_pod 1
Expand Down Expand Up @@ -1698,3 +1710,53 @@ jobs:
uses: ./.github/workflows/collect-logs
with:
name: ${{ github.job }}-${{ matrix.ceph-image }}

object-with-cephblockpools:
runs-on: ubuntu-22.04
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')"
strategy:
matrix:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0

- name: consider debugging
uses: ./.github/workflows/tmate_debug
with:
use-tmate: ${{ secrets.USE_TMATE }}

- name: setup cluster resources
uses: ./.github/workflows/canary-test-config

- name: set Ceph version in CephCluster manifest
run: tests/scripts/github-action-helper.sh replace_ceph_image "deploy/examples/cluster-test.yaml" "${{ github.event.inputs.ceph-image }}"

- name: validate-yaml
run: tests/scripts/github-action-helper.sh validate_yaml

- name: use local disk and create partitions for osds
run: |
tests/scripts/github-action-helper.sh use_local_disk
tests/scripts/github-action-helper.sh create_partitions_for_osds
- name: deploy cluster
run: tests/scripts/github-action-helper.sh deploy_cluster

- name: create CephBlockPool(s) and CephObjectStore
shell: bash --noprofile --norc -eo pipefail -x {0}
run: kubectl create -f deploy/examples/object-with-cephblockpools-test.yaml

- name: wait for CephObjectStore to be ready
run: tests/scripts/validate_cluster.sh rgw object-with-cephblockpools

- name: check for pools created by RGW that are unexpected
run: tests/scripts/github-action-helper.sh test_object_with_cephblockpools_extra_pools

- name: collect common logs
if: always()
uses: ./.github/workflows/collect-logs
with:
name: ${{ github.job }}-${{ matrix.ceph-image }}
122 changes: 1 addition & 121 deletions .github/workflows/daily-nightly-jobs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -110,46 +110,6 @@ jobs:
if: always()
run: sudo rm -rf /usr/bin/yq

smoke-suite-quincy-devel:
if: github.repository == 'rook/rook'
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0

- name: consider debugging
uses: ./.github/workflows/tmate_debug
with:
use-tmate: ${{ secrets.USE_TMATE }}

- name: setup cluster resources
uses: ./.github/workflows/integration-test-config-latest-k8s
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
kubernetes-version: "1.28.4"

- name: TestCephSmokeSuite
run: |
export DEVICE_FILTER=$(tests/scripts/github-action-helper.sh find_extra_block_dev)
SKIP_CLEANUP_POLICY=false CEPH_SUITE_VERSION="quincy-devel" go test -v -timeout 1800s -run TestCephSmokeSuite github.com/rook/rook/tests/integration
- name: collect common logs
if: always()
run: |
export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/"
export CLUSTER_NAMESPACE="smoke-ns"
export OPERATOR_NAMESPACE="smoke-ns-system"
tests/scripts/collect-logs.sh
- name: Artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
if: failure()
with:
name: ceph-smoke-suite-quincy-artifact
path: /home/runner/work/rook/rook/tests/integration/_output/tests/

smoke-suite-reef-devel:
if: github.repository == 'rook/rook'
runs-on: ubuntu-22.04
Expand Down Expand Up @@ -270,46 +230,6 @@ jobs:
name: ceph-smoke-suite-master-artifact
path: /home/runner/work/rook/rook/tests/integration/_output/tests/

object-suite-quincy-devel:
if: github.repository == 'rook/rook'
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0

- name: consider debugging
uses: ./.github/workflows/tmate_debug
with:
use-tmate: ${{ secrets.USE_TMATE }}

- name: setup cluster resources
uses: ./.github/workflows/integration-test-config-latest-k8s
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
kubernetes-version: "1.28.4"

- name: TestCephObjectSuite
run: |
export DEVICE_FILTER=$(tests/scripts/github-action-helper.sh find_extra_block_dev)
SKIP_CLEANUP_POLICY=false CEPH_SUITE_VERSION="quincy-devel" go test -v -timeout 1800s -failfast -run TestCephObjectSuite github.com/rook/rook/tests/integration
- name: collect common logs
if: always()
run: |
export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/"
export CLUSTER_NAMESPACE="object-ns"
export OPERATOR_NAMESPACE="object-ns-system"
tests/scripts/collect-logs.sh
- name: Artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
if: failure()
with:
name: ceph-object-suite-quincy-artifact
path: /home/runner/work/rook/rook/tests/integration/_output/tests/

object-suite-ceph-main:
if: github.repository == 'rook/rook'
runs-on: ubuntu-22.04
Expand Down Expand Up @@ -431,49 +351,9 @@ jobs:
name: ceph-upgrade-suite-reef-artifact
path: /home/runner/work/rook/rook/tests/integration/_output/tests/

upgrade-from-quincy-stable-to-quincy-devel:
if: github.repository == 'rook/rook'
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0

- name: consider debugging
uses: ./.github/workflows/tmate_debug
with:
use-tmate: ${{ secrets.USE_TMATE }}

- name: setup cluster resources
uses: ./.github/workflows/integration-test-config-latest-k8s
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
kubernetes-version: "1.28.4"

- name: TestCephUpgradeSuite
run: |
export DEVICE_FILTER=$(tests/scripts/github-action-helper.sh find_extra_block_dev)
go test -v -timeout 1800s -failfast -run TestCephUpgradeSuite/TestUpgradeCephToQuincyDevel github.com/rook/rook/tests/integration
- name: collect common logs
if: always()
run: |
export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/"
export CLUSTER_NAMESPACE="upgrade"
export OPERATOR_NAMESPACE="upgrade-system"
tests/scripts/collect-logs.sh
- name: Artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
if: failure()
with:
name: ceph-upgrade-suite-quincy-artifact
path: /home/runner/work/rook/rook/tests/integration/_output/tests/

canary-tests:
if: github.repository == 'rook/rook'
uses: ./.github/workflows/canary-integration-test.yml
with:
ceph_images: '["quay.io/ceph/ceph:v18", "quay.io/ceph/daemon-base:latest-main-devel", "quay.io/ceph/daemon-base:latest-quincy-devel", "quay.io/ceph/daemon-base:latest-reef-devel", "quay.io/ceph/daemon-base:latest-squid-devel"]'
ceph_images: '["quay.io/ceph/ceph:v18", "quay.io/ceph/daemon-base:latest-main-devel", "quay.io/ceph/daemon-base:latest-reef-devel", "quay.io/ceph/daemon-base:latest-squid-devel"]'
secrets: inherit
14 changes: 7 additions & 7 deletions Documentation/CRDs/Cluster/ceph-cluster-crd.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ Settings can be specified at the global level to apply to the cluster as a whole
* `image`: The image used for running the ceph daemons. For example, `quay.io/ceph/ceph:v18.2.4`. For more details read the [container images section](#ceph-container-images).
For the latest ceph images, see the [Ceph DockerHub](https://hub.docker.com/r/ceph/ceph/tags/).
To ensure a consistent version of the image is running across all nodes in the cluster, it is recommended to use a very specific image version.
Tags also exist that would give the latest version, but they are only recommended for test environments. For example, the tag `v17` will be updated each time a new Quincy build is released.
Using the `v17` tag is not recommended in production because it may lead to inconsistent versions of the image running across different nodes in the cluster.
* `allowUnsupported`: If `true`, allow an unsupported major version of the Ceph release. Currently `quincy` and `reef` are supported. Future versions such as `squid` (v19) would require this to be set to `true`. Should be set to `false` in production.
Tags also exist that would give the latest version, but they are only recommended for test environments. For example, the tag `v19` will be updated each time a new Squid build is released.
Using the general `v19` tag is not recommended in production because it may lead to inconsistent versions of the image running across different nodes in the cluster.
* `allowUnsupported`: If `true`, allow an unsupported major version of the Ceph release. Currently Reef and Squid are supported. Future versions such as Tentacle (v20) would require this to be set to `true`. Should be set to `false` in production.
* `imagePullPolicy`: The image pull policy for the ceph daemon pods. Possible values are `Always`, `IfNotPresent`, and `Never`. The default is `IfNotPresent`.
* `dataDirHostPath`: The path on the host ([hostPath](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath)) where config and data should be stored for each of the services. If the directory does not exist, it will be created. Because this directory persists on the host, it will remain after pods are deleted. Following paths and any of their subpaths **must not be used**: `/etc/ceph`, `/rook` or `/var/log/ceph`.
* **WARNING**: For test scenarios, if you delete a cluster and start a new cluster on the same hosts, the path used by `dataDirHostPath` must be deleted. Otherwise, stale keys and other config will remain from the previous cluster and the new mons will fail to start.
Expand Down Expand Up @@ -120,10 +120,10 @@ These are general purpose Ceph container with all necessary daemons and dependen

| TAG | MEANING |
| -------------------- | --------------------------------------------------------- |
| vRELNUM | Latest release in this series (e.g., **v17** = Quincy) |
| vRELNUM.Y | Latest stable release in this stable series (e.g., v17.2) |
| vRELNUM.Y.Z | A specific release (e.g., v18.2.4) |
| vRELNUM.Y.Z-YYYYMMDD | A specific build (e.g., v18.2.4-20240724) |
| vRELNUM | Latest release in this series (e.g., **v19** = Squid) |
| vRELNUM.Y | Latest stable release in this stable series (e.g., v19.2) |
| vRELNUM.Y.Z | A specific release (e.g., v19.2.0) |
| vRELNUM.Y.Z-YYYYMMDD | A specific build (e.g., v19.2.0-20240927) |

A specific will contain a specific release of Ceph as well as security fixes from the Operating System.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ In external mode, Rook will provide the configuration for the CSI driver and oth
Create the desired types of storage in the provider Ceph cluster:

* [RBD pools](https://docs.ceph.com/en/latest/rados/operations/pools/#create-a-pool)
* [CephFS filesystem](https://docs.ceph.com/en/quincy/cephfs/createfs/)
* [CephFS filesystem](https://docs.ceph.com/en/latest/cephfs/createfs/)

## Connect the external Ceph Provider cluster to the Rook consumer cluster

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ python3 create-external-cluster-resources.py --cephfs-filesystem-name <filesyste
### RGW Multisite

Pass the `--rgw-realm-name`, `--rgw-zonegroup-name` and `--rgw-zone-name` flags to create the admin ops user in a master zone, zonegroup and realm.
See the [Multisite doc](https://docs.ceph.com/en/quincy/radosgw/multisite/#configuring-a-master-zone) for creating a zone, zonegroup and realm.
See the [Multisite doc](https://docs.ceph.com/en/latest/radosgw/multisite/#configuring-a-master-zone) for creating a zone, zonegroup and realm.

```console
python3 create-external-cluster-resources.py --rbd-data-pool-name <pool_name> --format bash --rgw-endpoint <rgw_endpoint> --rgw-realm-name <rgw_realm_name>> --rgw-zonegroup-name <rgw_zonegroup_name> --rgw-zone-name <rgw_zone_name>>
Expand Down
4 changes: 1 addition & 3 deletions Documentation/CRDs/Object-Storage/ceph-object-store-crd.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ The protocols section is divided into two parts:
In the `s3` section of the `protocols` section the following options can be configured:

* `authKeystone`: Whether S3 should also authenticated using Keystone (`true`) or not (`false`). If set to `false` the default S3 auth will be used.
* `enabled`: Whether to enable S3 (`true`) or not (`false`). The default is `true` even if the section is not listed at all! Please note that S3 should not be disabled in a [Ceph Multi Site configuration](https://docs.ceph.com/en/quincy/radosgw/multisite).
* `enabled`: Whether to enable S3 (`true`) or not (`false`). The default is `true` even if the section is not listed at all! Please note that S3 should not be disabled in a [Ceph Multi Site configuration](https://docs.ceph.com/en/latest/radosgw/multisite).

#### protocols/swift settings

Expand Down Expand Up @@ -332,9 +332,7 @@ vault kv put rook/<mybucketkey> key=$(openssl rand -base64 32) # kv engine
vault write -f transit/keys/<mybucketkey> exportable=true # transit engine
```

* TLS authentication with custom certificates between Vault and CephObjectStore RGWs are supported from ceph v16.2.6 onwards
* `tokenSecretName` can be (and often will be) the same for both kms and s3 configurations.
* `AWS-SSE:S3` requires Ceph Quincy v17.2.3 or later.

## Deleting a CephObjectStore

Expand Down
12 changes: 0 additions & 12 deletions Documentation/CRDs/ceph-nfs-crd.md
Original file line number Diff line number Diff line change
Expand Up @@ -194,15 +194,3 @@ the size of the cluster.
not always happen due to the Kubernetes scheduler.
* Workaround: It is safest to run only a single NFS server, but we do not limit this if it
benefits your use case.

### Ceph v17.2.1

* Ceph NFS management with the Rook mgr module enabled has a breaking regression with the Ceph
Quincy v17.2.1 release.
* Workaround: Leave Ceph's Rook orchestrator mgr module disabled. If you have enabled it, you must
disable it using the snippet below from the toolbox.

```console
ceph orch set backend ""
ceph mgr module disable rook
```
Loading

0 comments on commit 5162a9e

Please sign in to comment.