diff --git a/test/addons/rbd-mirror/start b/test/addons/rbd-mirror/start index 55b6192ff..79894b36c 100755 --- a/test/addons/rbd-mirror/start +++ b/test/addons/rbd-mirror/start @@ -18,7 +18,24 @@ from drenv import ceph from drenv import commands from drenv import kubectl -POOL_NAME = "replicapool" +POOL_NAMES = ["replicapool", "replicapool-2"] +VOLUME_REPLICATION_CLASSES = ["vrc-sample", "vrc-sample-2"] +VOLUME_GROUP_REPLICATION_CLASSES = ["vgrc-sample", "vgrc-sample-2"] + +STORAGE_CLASSES = [ + { + "name": "rook-ceph-block", + "pool": POOL_NAMES[0], + "vrc": VOLUME_REPLICATION_CLASSES[0], + "vgrc": VOLUME_GROUP_REPLICATION_CLASSES[0], + }, + { + "name": "rook-ceph-block-2", + "pool": POOL_NAMES[1], + "vrc": VOLUME_REPLICATION_CLASSES[1], + "vgrc": VOLUME_GROUP_REPLICATION_CLASSES[1], + }, +] def log_blocklist(cluster): @@ -33,7 +50,7 @@ def fetch_secret_info(cluster): print(f"Getting mirroring info site name for cluster '{cluster}'") info["name"] = drenv.wait_for( - f"cephblockpools.ceph.rook.io/{POOL_NAME}", + f"cephblockpools.ceph.rook.io/{POOL_NAMES[0]}", output="jsonpath={.status.mirroringInfo.site_name}", namespace="rook-ceph", profile=cluster, @@ -42,7 +59,7 @@ def fetch_secret_info(cluster): print(f"Getting rbd mirror boostrap peer secret name for cluster '{cluster}'") secret_name = kubectl.get( "cephblockpools.ceph.rook.io", - POOL_NAME, + POOL_NAMES[0], "--output=jsonpath={.status.info.rbdMirrorBootstrapPeerSecretName}", "--namespace=rook-ceph", context=cluster, @@ -58,7 +75,7 @@ def fetch_secret_info(cluster): ) # Must be encoded as base64 in secret .data section. - info["pool"] = base64.b64encode(POOL_NAME.encode()).decode() + info["pool"] = base64.b64encode(POOL_NAMES[0].encode()).decode() return info @@ -99,21 +116,29 @@ def configure_rbd_mirroring(cluster, peer_info): patch = {"spec": {"mirroring": {"peers": {"secretNames": [peer_info["name"]]}}}} kubectl.patch( "cephblockpool", - POOL_NAME, + POOL_NAMES[0], "--type=merge", f"--patch={json.dumps(patch)}", "--namespace=rook-ceph", context=cluster, ) - print("Creating VolumeReplicationClass") - template = drenv.template("start-data/vrc-sample.yaml") - yaml = template.substitute(cluster=cluster) - kubectl.apply("--filename=-", input=yaml, context=cluster) - - template = drenv.template("start-data/vgrc-sample.yaml") - yaml = template.substitute(cluster=cluster, pool=POOL_NAME) - kubectl.apply("--filename=-", input=yaml, context=cluster) + for storage_class in STORAGE_CLASSES: + print("Creating VolumeReplicationClass") + template = drenv.template("start-data/vrc-sample.yaml") + yaml = template.substitute( + cluster=cluster, vrcname=storage_class["vrc"], scname=storage_class["name"] + ) + kubectl.apply("--filename=-", input=yaml, context=cluster) + + template = drenv.template("start-data/vgrc-sample.yaml") + yaml = template.substitute( + cluster=cluster, + pool=storage_class["pool"], + vgrcname=storage_class["vgrc"], + scname=storage_class["name"], + ) + kubectl.apply("--filename=-", input=yaml, context=cluster) print(f"Apply rbd mirror to cluster '{cluster}'") kubectl.apply("--kustomize=start-data", context=cluster) @@ -163,7 +188,7 @@ def wait_until_pool_mirroring_is_healthy(cluster, attempts=3): status = kubectl.get( "cephblockpools.ceph.rook.io", - POOL_NAME, + POOL_NAMES[0], "--output=jsonpath={.status}", "--namespace=rook-ceph", context=cluster, @@ -183,7 +208,7 @@ def watch_pool_mirroring_status(cluster, timeout=180): while True: remaining = deadline - time.monotonic() watcher = kubectl.watch( - f"cephblockpool/{POOL_NAME}", + f"cephblockpool/{POOL_NAMES[0]}", jsonpath="{.status.mirroringStatus.summary}", namespace="rook-ceph", timeout=remaining, diff --git a/test/addons/rbd-mirror/start-data/vgrc-sample.yaml b/test/addons/rbd-mirror/start-data/vgrc-sample.yaml index 2f44aa193..a6c3feabe 100644 --- a/test/addons/rbd-mirror/start-data/vgrc-sample.yaml +++ b/test/addons/rbd-mirror/start-data/vgrc-sample.yaml @@ -5,10 +5,10 @@ apiVersion: replication.storage.openshift.io/v1alpha1 kind: VolumeGroupReplicationClass metadata: - name: vgrc-sample + name: $vgrcname labels: - ramendr.openshift.io/storageid: rook-ceph-$cluster-1 - ramendr.openshift.io/replicationid: rook-ceph-replication-1 + ramendr.openshift.io/storageid: $scname-$cluster-1 + ramendr.openshift.io/replicationid: $scname-replication-1 spec: provisioner: rook-ceph.rbd.csi.ceph.com parameters: diff --git a/test/addons/rbd-mirror/start-data/vrc-sample.yaml b/test/addons/rbd-mirror/start-data/vrc-sample.yaml index bfdcd1395..ef710fbf6 100644 --- a/test/addons/rbd-mirror/start-data/vrc-sample.yaml +++ b/test/addons/rbd-mirror/start-data/vrc-sample.yaml @@ -5,10 +5,10 @@ apiVersion: replication.storage.openshift.io/v1alpha1 kind: VolumeReplicationClass metadata: - name: vrc-sample + name: $vrcname labels: - ramendr.openshift.io/storageid: rook-ceph-$cluster-1 - ramendr.openshift.io/replicationid: rook-ceph-replication-1 + ramendr.openshift.io/storageid: $scname-$cluster-1 + ramendr.openshift.io/replicationid: $scname-replication-1 spec: provisioner: rook-ceph.rbd.csi.ceph.com parameters: diff --git a/test/addons/rook-cephfs/snapshot-class.yaml b/test/addons/rook-cephfs/snapshot-class.yaml index 7d2161b4e..23c744a74 100644 --- a/test/addons/rook-cephfs/snapshot-class.yaml +++ b/test/addons/rook-cephfs/snapshot-class.yaml @@ -11,9 +11,9 @@ deletionPolicy: Delete driver: rook-ceph.cephfs.csi.ceph.com kind: VolumeSnapshotClass metadata: - name: csi-cephfsplugin-snapclass + name: $vscname labels: - ramendr.openshift.io/storageid: rook-cephfs-$cluster-1 + ramendr.openshift.io/storageid: rook-cephfs-$fsname-$cluster-1 parameters: clusterID: rook-ceph csi.storage.k8s.io/snapshotter-secret-name: rook-csi-cephfs-provisioner diff --git a/test/addons/rook-cephfs/start b/test/addons/rook-cephfs/start index 8dc750ec6..653972e4e 100755 --- a/test/addons/rook-cephfs/start +++ b/test/addons/rook-cephfs/start @@ -9,40 +9,48 @@ import sys import drenv from drenv import kubectl -FILE_SYSTEMS = ["test-fs1", "test-fs2"] +FS_NAMES = ["test-fs1", "test-fs2"] +VOLUME_SNAPSHOT_CLASSES = ["csi-cephfsplugin-snapclass", "csi-cephfsplugin-snapclass-2"] + +FILE_SYSTEMS = [ + {"name": FS_NAMES[0], "vsc": VOLUME_SNAPSHOT_CLASSES[0]}, + {"name": FS_NAMES[1], "vsc": VOLUME_SNAPSHOT_CLASSES[1]}, +] def deploy(cluster): for file_system in FILE_SYSTEMS: - print("Creating CephFS instance") + print("Creating CephFS instances") template = drenv.template("filesystem.yaml") - yaml = template.substitute(cluster=cluster, name=file_system) + yaml = template.substitute(cluster=cluster, name=file_system["name"]) kubectl.apply("--filename=-", input=yaml, context=cluster) - print("Creating StorageClass") + print("Creating StorageClasses") template = drenv.template("storage-class.yaml") - yaml = template.substitute(cluster=cluster, fsname=file_system) + yaml = template.substitute(cluster=cluster, fsname=file_system["name"]) kubectl.apply("--filename=-", input=yaml, context=cluster) - print("Creating SnapshotClass") - template = drenv.template("snapshot-class.yaml") - yaml = template.substitute(cluster=cluster) - kubectl.apply("--filename=-", input=yaml, context=cluster) + print("Creating SnapshotClasses") + template = drenv.template("snapshot-class.yaml") + yaml = template.substitute( + cluster=cluster, vscname=file_system["vsc"], fsname=file_system["name"] + ) + kubectl.apply("--filename=-", input=yaml, context=cluster) def wait(cluster): print("Waiting until Ceph File Systems are ready") - for file_system in FILE_SYSTEMS: + for fs_name in FS_NAMES: drenv.wait_for( - f"cephfilesystem/{file_system}", + f"cephfilesystem/{fs_name}", output="jsonpath={.status.phase}", namespace="rook-ceph", timeout=120, profile=cluster, ) kubectl.wait( - f"cephfilesystem/{file_system}", + f"cephfilesystem/{fs_name}", "--for=jsonpath={.status.phase}=Ready", "--namespace=rook-ceph", "--timeout=300s", diff --git a/test/addons/rook-cephfs/storage-class.yaml b/test/addons/rook-cephfs/storage-class.yaml index 1fb3028fc..97dd7a5ff 100644 --- a/test/addons/rook-cephfs/storage-class.yaml +++ b/test/addons/rook-cephfs/storage-class.yaml @@ -11,7 +11,7 @@ kind: StorageClass metadata: name: rook-cephfs-$fsname labels: - ramendr.openshift.io/storageid: rook-cephfs-$cluster-1 + ramendr.openshift.io/storageid: rook-cephfs-$fsname-$cluster-1 provisioner: rook-ceph.cephfs.csi.ceph.com parameters: clusterID: rook-ceph diff --git a/test/addons/rook-pool/snapshot-class.yaml b/test/addons/rook-pool/snapshot-class.yaml index 0b188b700..dfe0d5224 100644 --- a/test/addons/rook-pool/snapshot-class.yaml +++ b/test/addons/rook-pool/snapshot-class.yaml @@ -7,9 +7,9 @@ apiVersion: snapshot.storage.k8s.io/v1 kind: VolumeSnapshotClass metadata: - name: csi-rbdplugin-snapclass + name: $vscname labels: - ramendr.openshift.io/storageid: rook-ceph-$cluster-1 + ramendr.openshift.io/storageid: $scname-$cluster-1 driver: rook-ceph.rbd.csi.ceph.com parameters: clusterID: rook-ceph diff --git a/test/addons/rook-pool/start b/test/addons/rook-pool/start index 53645b1e3..c7283debd 100755 --- a/test/addons/rook-pool/start +++ b/test/addons/rook-pool/start @@ -13,14 +13,29 @@ import drenv from drenv import kubectl POOL_NAMES = ["replicapool", "replicapool-2"] +VOLUME_SSNAPSHOT_CLASSES = ["csi-rbdplugin-snapclass", "csi-rbdplugin-snapclass-2"] def deploy(cluster): storage_classes = [ - {"name": "rook-ceph-block", "pool": POOL_NAMES[0]}, - {"name": "rook-ceph-block-2", "pool": POOL_NAMES[1]}, + { + "name": "rook-ceph-block", + "pool": POOL_NAMES[0], + "vsc": VOLUME_SSNAPSHOT_CLASSES[0], + }, + { + "name": "rook-ceph-block-2", + "pool": POOL_NAMES[1], + "vsc": VOLUME_SSNAPSHOT_CLASSES[1], + }, ] + print("Creating RBD pools") + for pool in POOL_NAMES: + template = drenv.template("replica-pool.yaml") + yaml = template.substitute(cluster=cluster, name=pool) + kubectl.apply("--filename=-", input=yaml, context=cluster) + print("Creating StorageClasses") for storage_class in storage_classes: template = drenv.template("storage-class.yaml") @@ -29,17 +44,15 @@ def deploy(cluster): ) kubectl.apply("--filename=-", input=yaml, context=cluster) - print("Creating RBD pools") - for pool in POOL_NAMES: - template = drenv.template("replica-pool.yaml") - yaml = template.substitute(cluster=cluster, name=pool) + print("Creating SnapshotClasses") + template = drenv.template("snapshot-class.yaml") + yaml = template.substitute( + cluster=cluster, + vscname=storage_class["vsc"], + scname=storage_class["name"], + ) kubectl.apply("--filename=-", input=yaml, context=cluster) - print("Creating SnapshotClass") - template = drenv.template("snapshot-class.yaml") - yaml = template.substitute(cluster=cluster) - kubectl.apply("--filename=-", input=yaml, context=cluster) - def wait(cluster): print("Waiting until ceph block pool is ready") diff --git a/test/addons/rook-pool/storage-class.yaml b/test/addons/rook-pool/storage-class.yaml index 6ab40e0f8..3a1e3488c 100644 --- a/test/addons/rook-pool/storage-class.yaml +++ b/test/addons/rook-pool/storage-class.yaml @@ -7,7 +7,7 @@ kind: StorageClass metadata: name: $name labels: - ramendr.openshift.io/storageid: rook-ceph-$cluster-1 + ramendr.openshift.io/storageid: $name-$cluster-1 provisioner: rook-ceph.rbd.csi.ceph.com parameters: clusterID: rook-ceph