Skip to content

Commit

Permalink
Add New VRCs & VSCs, FixDuplicate SC-ID Issue
Browse files Browse the repository at this point in the history
This commit continues the improvements introduced in [1] and [2] by
adding new VRCs and VSCs based on the newly created SCs. Additionally, it
includes minor fixes to prevent duplicate SC-ID generation.

References:
[1] RamenDR#1756
[2] RamenDR#1756

Signed-off-by: rakeshgm <rakeshgm@redhat.com>
(cherry picked from commit 538dc1d)
  • Loading branch information
rakeshgm committed Feb 17, 2025
1 parent 0fb0c74 commit 752469d
Show file tree
Hide file tree
Showing 7 changed files with 94 additions and 48 deletions.
55 changes: 40 additions & 15 deletions test/addons/rbd-mirror/start
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,24 @@ from drenv import ceph
from drenv import commands
from drenv import kubectl

POOL_NAME = "replicapool"
POOL_NAMES = ["replicapool", "replicapool-2"]
VOLUME_REPLICATION_CLASSES = ["vrc-sample", "vrc-sample-2"]
VOLUME_GROUP_REPLICATION_CLASSES = ["vgrc-sample", "vgrc-sample-2"]

STORAGE_CLASSES = [
{
"name": "rook-ceph-block",
"pool": POOL_NAMES[0],
"vrc": VOLUME_REPLICATION_CLASSES[0],
"vgrc": VOLUME_GROUP_REPLICATION_CLASSES[0],
},
{
"name": "rook-ceph-block-2",
"pool": POOL_NAMES[1],
"vrc": VOLUME_REPLICATION_CLASSES[1],
"vgrc": VOLUME_GROUP_REPLICATION_CLASSES[1],
},
]


def log_blocklist(cluster):
Expand All @@ -33,7 +50,7 @@ def fetch_secret_info(cluster):

print(f"Getting mirroring info site name for cluster '{cluster}'")
info["name"] = drenv.wait_for(
f"cephblockpools.ceph.rook.io/{POOL_NAME}",
f"cephblockpools.ceph.rook.io/{POOL_NAMES[0]}",
output="jsonpath={.status.mirroringInfo.site_name}",
namespace="rook-ceph",
profile=cluster,
Expand All @@ -42,7 +59,7 @@ def fetch_secret_info(cluster):
print(f"Getting rbd mirror boostrap peer secret name for cluster '{cluster}'")
secret_name = kubectl.get(
"cephblockpools.ceph.rook.io",
POOL_NAME,
POOL_NAMES[0],
"--output=jsonpath={.status.info.rbdMirrorBootstrapPeerSecretName}",
"--namespace=rook-ceph",
context=cluster,
Expand All @@ -58,7 +75,7 @@ def fetch_secret_info(cluster):
)

# Must be encoded as base64 in secret .data section.
info["pool"] = base64.b64encode(POOL_NAME.encode()).decode()
info["pool"] = base64.b64encode(POOL_NAMES[0].encode()).decode()

return info

Expand Down Expand Up @@ -99,21 +116,29 @@ def configure_rbd_mirroring(cluster, peer_info):
patch = {"spec": {"mirroring": {"peers": {"secretNames": [peer_info["name"]]}}}}
kubectl.patch(
"cephblockpool",
POOL_NAME,
POOL_NAMES[0],
"--type=merge",
f"--patch={json.dumps(patch)}",
"--namespace=rook-ceph",
context=cluster,
)

print("Creating VolumeReplicationClass")
template = drenv.template("start-data/vrc-sample.yaml")
yaml = template.substitute(cluster=cluster)
kubectl.apply("--filename=-", input=yaml, context=cluster)

template = drenv.template("start-data/vgrc-sample.yaml")
yaml = template.substitute(cluster=cluster, pool=POOL_NAME)
kubectl.apply("--filename=-", input=yaml, context=cluster)
for storage_class in STORAGE_CLASSES:
print("Creating VolumeReplicationClass")
template = drenv.template("start-data/vrc-sample.yaml")
yaml = template.substitute(
cluster=cluster, vrcname=storage_class["vrc"], scname=storage_class["name"]
)
kubectl.apply("--filename=-", input=yaml, context=cluster)

template = drenv.template("start-data/vgrc-sample.yaml")
yaml = template.substitute(
cluster=cluster,
pool=storage_class["pool"],
vgrcname=storage_class["vgrc"],
scname=storage_class["name"],
)
kubectl.apply("--filename=-", input=yaml, context=cluster)

print(f"Apply rbd mirror to cluster '{cluster}'")
kubectl.apply("--kustomize=start-data", context=cluster)
Expand Down Expand Up @@ -163,7 +188,7 @@ def wait_until_pool_mirroring_is_healthy(cluster, attempts=3):

status = kubectl.get(
"cephblockpools.ceph.rook.io",
POOL_NAME,
POOL_NAMES[0],
"--output=jsonpath={.status}",
"--namespace=rook-ceph",
context=cluster,
Expand All @@ -183,7 +208,7 @@ def watch_pool_mirroring_status(cluster, timeout=180):
while True:
remaining = deadline - time.monotonic()
watcher = kubectl.watch(
f"cephblockpool/{POOL_NAME}",
f"cephblockpool/{POOL_NAMES[0]}",
jsonpath="{.status.mirroringStatus.summary}",
namespace="rook-ceph",
timeout=remaining,
Expand Down
6 changes: 3 additions & 3 deletions test/addons/rbd-mirror/start-data/vgrc-sample.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
apiVersion: replication.storage.openshift.io/v1alpha1
kind: VolumeGroupReplicationClass
metadata:
name: vgrc-sample
name: $vgrcname
labels:
ramendr.openshift.io/storageid: rook-ceph-block-$cluster-1
ramendr.openshift.io/replicationid: rook-ceph-replication-1
ramendr.openshift.io/storageid: $scname-$cluster-1
ramendr.openshift.io/replicationid: $scname-replication-1
spec:
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
Expand Down
6 changes: 3 additions & 3 deletions test/addons/rbd-mirror/start-data/vrc-sample.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
apiVersion: replication.storage.openshift.io/v1alpha1
kind: VolumeReplicationClass
metadata:
name: vrc-sample
name: $vrcname
labels:
ramendr.openshift.io/storageid: rook-ceph-block-$cluster-1
ramendr.openshift.io/replicationid: rook-ceph-replication-1
ramendr.openshift.io/storageid: $scname-$cluster-1
ramendr.openshift.io/replicationid: $scname-replication-1
spec:
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
Expand Down
4 changes: 2 additions & 2 deletions test/addons/rook-cephfs/snapshot-class.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ deletionPolicy: Delete
driver: rook-ceph.cephfs.csi.ceph.com
kind: VolumeSnapshotClass
metadata:
name: csi-cephfsplugin-snapclass
name: $vscname
labels:
ramendr.openshift.io/storageid: rook-cephfs-test-fs1-$cluster-1
ramendr.openshift.io/storageid: rook-cephfs-$fsname-$cluster-1
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/snapshotter-secret-name: rook-csi-cephfs-provisioner
Expand Down
32 changes: 20 additions & 12 deletions test/addons/rook-cephfs/start
Original file line number Diff line number Diff line change
Expand Up @@ -9,40 +9,48 @@ import sys
import drenv
from drenv import kubectl

FILE_SYSTEMS = ["test-fs1", "test-fs2"]
FS_NAMES = ["test-fs1", "test-fs2"]
VOLUME_SNAPSHOT_CLASSES = ["csi-cephfsplugin-snapclass", "csi-cephfsplugin-snapclass-2"]

FILE_SYSTEMS = [
{"name": FS_NAMES[0], "vsc": VOLUME_SNAPSHOT_CLASSES[0]},
{"name": FS_NAMES[1], "vsc": VOLUME_SNAPSHOT_CLASSES[1]},
]


def deploy(cluster):
for file_system in FILE_SYSTEMS:
print("Creating CephFS instance")
print("Creating CephFS instances")
template = drenv.template("filesystem.yaml")
yaml = template.substitute(cluster=cluster, name=file_system)
yaml = template.substitute(cluster=cluster, name=file_system["name"])
kubectl.apply("--filename=-", input=yaml, context=cluster)

print("Creating StorageClass")
print("Creating StorageClasses")
template = drenv.template("storage-class.yaml")
yaml = template.substitute(cluster=cluster, fsname=file_system)
yaml = template.substitute(cluster=cluster, fsname=file_system["name"])
kubectl.apply("--filename=-", input=yaml, context=cluster)

print("Creating SnapshotClass")
template = drenv.template("snapshot-class.yaml")
yaml = template.substitute(cluster=cluster)
kubectl.apply("--filename=-", input=yaml, context=cluster)
print("Creating SnapshotClasses")
template = drenv.template("snapshot-class.yaml")
yaml = template.substitute(
cluster=cluster, vscname=file_system["vsc"], fsname=file_system["name"]
)
kubectl.apply("--filename=-", input=yaml, context=cluster)


def wait(cluster):
print("Waiting until Ceph File Systems are ready")

for file_system in FILE_SYSTEMS:
for fs_name in FS_NAMES:
drenv.wait_for(
f"cephfilesystem/{file_system}",
f"cephfilesystem/{fs_name}",
output="jsonpath={.status.phase}",
namespace="rook-ceph",
timeout=120,
profile=cluster,
)
kubectl.wait(
f"cephfilesystem/{file_system}",
f"cephfilesystem/{fs_name}",
"--for=jsonpath={.status.phase}=Ready",
"--namespace=rook-ceph",
"--timeout=300s",
Expand Down
4 changes: 2 additions & 2 deletions test/addons/rook-pool/snapshot-class.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
name: csi-rbdplugin-snapclass
name: $vscname
labels:
ramendr.openshift.io/storageid: rook-ceph-block-$cluster-1
ramendr.openshift.io/storageid: $scname-$cluster-1
driver: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
Expand Down
35 changes: 24 additions & 11 deletions test/addons/rook-pool/start
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,29 @@ import drenv
from drenv import kubectl

POOL_NAMES = ["replicapool", "replicapool-2"]
VOLUME_SSNAPSHOT_CLASSES = ["csi-rbdplugin-snapclass", "csi-rbdplugin-snapclass-2"]


def deploy(cluster):
storage_classes = [
{"name": "rook-ceph-block", "pool": POOL_NAMES[0]},
{"name": "rook-ceph-block-2", "pool": POOL_NAMES[1]},
{
"name": "rook-ceph-block",
"pool": POOL_NAMES[0],
"vsc": VOLUME_SSNAPSHOT_CLASSES[0],
},
{
"name": "rook-ceph-block-2",
"pool": POOL_NAMES[1],
"vsc": VOLUME_SSNAPSHOT_CLASSES[1],
},
]

print("Creating RBD pools")
for pool in POOL_NAMES:
template = drenv.template("replica-pool.yaml")
yaml = template.substitute(cluster=cluster, name=pool)
kubectl.apply("--filename=-", input=yaml, context=cluster)

print("Creating StorageClasses")
for storage_class in storage_classes:
template = drenv.template("storage-class.yaml")
Expand All @@ -29,17 +44,15 @@ def deploy(cluster):
)
kubectl.apply("--filename=-", input=yaml, context=cluster)

print("Creating RBD pools")
for pool in POOL_NAMES:
template = drenv.template("replica-pool.yaml")
yaml = template.substitute(cluster=cluster, name=pool)
print("Creating SnapshotClasses")
template = drenv.template("snapshot-class.yaml")
yaml = template.substitute(
cluster=cluster,
vscname=storage_class["vsc"],
scname=storage_class["name"],
)
kubectl.apply("--filename=-", input=yaml, context=cluster)

print("Creating SnapshotClass")
template = drenv.template("snapshot-class.yaml")
yaml = template.substitute(cluster=cluster)
kubectl.apply("--filename=-", input=yaml, context=cluster)


def wait(cluster):
print("Waiting until ceph block pool is ready")
Expand Down

0 comments on commit 752469d

Please # to comment.