Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Updates to testsuite according to cnf-testsuite/kubectl_client#17 #2221

Draft
wants to merge 8 commits into
base: main
Choose a base branch
from
12 changes: 6 additions & 6 deletions shard.lock
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ version: 2.0
shards:
cluster_tools:
git: https://github.com/cnf-testsuite/cluster_tools.git
version: 1.0.9
version: 1.0.0+git.commit.c586bd570757b94ea32f8219cbcedce71ef74b28

commander:
git: https://github.com/mrrooijen/commander.git
Expand All @@ -26,35 +26,35 @@ shards:

helm:
git: https://github.com/cnf-testsuite/helm.git
version: 1.0.3
version: 1.0.2+git.commit.06d2a643953f362de66adebb01b2b6bb309ba85c

icr:
git: https://github.com/crystal-community/icr.git
version: 0.9.0+git.commit.f62bfcdfbe65ee31c46c3d9951cee08ac2e0bee0

k8s_kernel_introspection:
git: https://github.com/cnf-testsuite/k8s_kernel_introspection.git
version: 1.0.3
version: 1.0.2+git.commit.540e8507742bf810ae40821d089ca69218d2b100

k8s_netstat:
git: https://github.com/cnf-testsuite/k8s_netstat.git
version: 1.0.1
version: 1.0.0+git.commit.0b8ea2af4cf5979e65aa1ec99eb83a32c4cd03d3

kernel_introspection:
git: https://github.com/cnf-testsuite/kernel_introspection.git
version: 0.1.0

kubectl_client:
git: https://github.com/cnf-testsuite/kubectl_client.git
version: 1.0.7
version: 1.0.6+git.commit.0ea49dea3450699eeb3dcdda698c6231d29d5d07

popcorn:
git: https://github.com/icyleaf/popcorn.git
version: 0.3.0

protobuf:
git: https://github.com/jeromegn/protobuf.cr.git
version: 2.3.0
version: 2.3.1

readline:
git: https://github.com/crystal-lang/crystal-readline.git
Expand Down
17 changes: 10 additions & 7 deletions shard.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,27 +49,30 @@ dependencies:
version: ~> 1.0.0
kubectl_client:
github: cnf-testsuite/kubectl_client
version: ~> 1.0.7
branch: rlal-refactor-k8sclient-module
# version: ~> 1.0.7
cluster_tools:
github: cnf-testsuite/cluster_tools
version: ~> 1.0.9
branch: rlal_change_kubectl_shard_ver
# version: ~> 1.0.9
kernel_introspection:
github: cnf-testsuite/kernel_introspection
version: ~> 0.1.0
k8s_kernel_introspection:
github: cnf-testsuite/k8s_kernel_introspection
version: ~> 1.0.3
branch: rlal_change_kubectl_shard_ver
# version: ~> 1.0.3
helm:
github: cnf-testsuite/helm
version: ~> 1.0.1
branch: rlal_change_kubectl_shard_ver
# version: ~> 1.0.1
k8s_netstat:
github: cnf-testsuite/k8s_netstat
version: ~> 1.0.1
branch: rlal_change_kubectl_shard_ver
# version: ~> 1.0.1
release_manager:
github: cnf-testsuite/release_manager
branch: main
retriable:
github: Sija/retriable.cr
protobuf:
github: jeromegn/protobuf.cr

Expand Down
4 changes: 2 additions & 2 deletions spec/5g/ran_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ def setup_5g_network

# Run Helm install command for the 5G network
helm_chart_path = "sample-cnfs/sample_srsran_ueauth_open5gs/open5gs"
Helm.install("open5gs #{helm_chart_path} -n oran --create-namespace")
Helm.install("open5gs", helm_chart_path, namespace: "oran", create_namespace: true)

deployment_names.each do |deployment_name|
# Wait for each deployment to be ready
ready = KubectlClient::Get.resource_wait_for_install("deployment", deployment_name, namespace: "oran")
ready = KubectlClient::Wait.resource_wait_for_install("deployment", deployment_name, namespace: "oran")
if !ready
stdout_failure "Could not set up the 5g network"
return false
Expand Down
4 changes: 2 additions & 2 deletions spec/cluster_setup_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ require "cluster_tools"

describe "Cluster Setup" do
it "'install_cluster_tools' should give a message if namespace does not exist", tags: ["cluster_setup"] do
KubectlClient::Delete.command("namespace #{ClusterTools.namespace}")
KubectlClient::Delete.resource("namespace", ClusterTools.namespace)
result = ShellCmd.run_testsuite("install_cluster_tools")
result[:status].success?.should be_false
(/Error: Namespace cnf-testsuite does not exist./ =~ result[:output]).should_not be_nil
Expand All @@ -19,7 +19,7 @@ describe "Cluster Setup" do
it "'install_cluster_tools' should give a message if namespace does not exist even after dependency installation", tags: ["cluster_setup"] do
result = ShellCmd.run_testsuite("setup")

KubectlClient::Delete.command("namespace #{ClusterTools.namespace}")
KubectlClient::Delete.resource("namespace", ClusterTools.namespace)

result = ShellCmd.run_testsuite("install_cluster_tools")
result[:status].success?.should be_false
Expand Down
13 changes: 6 additions & 7 deletions spec/platform/observability_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ describe "Platform Observability" do
result = ShellCmd.run("#{helm} repo add prometheus-community https://prometheus-community.github.io/helm-charts")
result = ShellCmd.run("#{helm} repo update")
result = ShellCmd.run("#{helm} -n #{TESTSUITE_NAMESPACE} install --version 5.3.0 kube-state-metrics prometheus-community/kube-state-metrics", force_output: true)
KubectlClient::Get.wait_for_install("kube-state-metrics", namespace: TESTSUITE_NAMESPACE)
KubectlClient::Wait.resource_wait_for_install("deployment", "kube-state-metrics", namespace: TESTSUITE_NAMESPACE)

result = ShellCmd.run_testsuite("platform:kube_state_metrics poc")
(/(PASSED).*(Your platform is using the).*(release for kube state metrics)/ =~ result[:output]).should_not be_nil
Expand All @@ -33,8 +33,7 @@ describe "Platform Observability" do
result = ShellCmd.run("#{helm} install -n #{TESTSUITE_NAMESPACE} node-exporter prometheus-community/prometheus-node-exporter", force_output: true)

repeat_with_timeout(timeout: POD_READINESS_TIMEOUT, errormsg: "Pod readiness has timed-out") do
pod_ready = KubectlClient::Get.pod_status("node-exporter-prometheus", namespace: TESTSUITE_NAMESPACE).split(",")[2] == "true"
Log.info { "Pod Ready Status: #{pod_ready}" }
pod_ready = KubectlClient::Get.pod_ready?("node-exporter-prometheus", namespace: TESTSUITE_NAMESPACE)
pod_ready
end
result = ShellCmd.run_testsuite("platform:node_exporter poc")
Expand All @@ -53,12 +52,12 @@ describe "Platform Observability" do
helm = Helm::BinarySingleton.helm
begin
Helm.helm_repo_add("prometheus-community","https://prometheus-community.github.io/helm-charts")
result = Helm.install("-n #{TESTSUITE_NAMESPACE} prometheus-adapter prometheus-community/prometheus-adapter")
result = Helm.install("prometheus-adapter", "prometheus-community/prometheus-adapter", namespace: TESTSUITE_NAMESPACE)
Log.info { "Prometheus installed" }
rescue e : Helm::CannotReuseReleaseNameError
Log.info { "Prometheus already installed" }
end
KubectlClient::Get.wait_for_install("prometheus-adapter", namespace: TESTSUITE_NAMESPACE)
KubectlClient::Wait.resource_wait_for_install("deployment", "prometheus-adapter", namespace: TESTSUITE_NAMESPACE)

result = ShellCmd.run_testsuite("platform:prometheus_adapter poc")
(/(PASSED).*(Your platform is using the prometheus adapter)/ =~ result[:output]).should_not be_nil
Expand All @@ -71,13 +70,13 @@ describe "Platform Observability" do
helm = Helm::BinarySingleton.helm
begin
Helm.helm_repo_add("metrics-server","https://kubernetes-sigs.github.io/metrics-server/")
result = Helm.install("-n #{TESTSUITE_NAMESPACE} metrics-server -f spec/fixtures/metrics_values.yml metrics-server/metrics-server")
result = Helm.install("metrics-server", "metrics-server/metrics-server", namespace: TESTSUITE_NAMESPACE, values: "--values spec/fixtures/metrics_values.yml")
Log.info { "Metrics Server installed" }
rescue e : Helm::CannotReuseReleaseNameError
Log.info { "Metrics Server already installed" }
end
Log.info { result }
KubectlClient::Get.wait_for_install(deployment_name: "metrics-server", namespace: TESTSUITE_NAMESPACE)
KubectlClient::Wait.resource_wait_for_install("deployment", "metrics-server", namespace: TESTSUITE_NAMESPACE)
result = ShellCmd.run_testsuite("platform:metrics_server poc")
(/(PASSED).*(Your platform is using the metrics server)/ =~ result[:output]).should_not be_nil
ensure
Expand Down
6 changes: 3 additions & 3 deletions spec/platform/security_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,13 @@ describe "Platform" do

it "'helm_tiller' should fail if Helm Tiller is running in the cluster", tags: ["platform:security"] do
ShellCmd.run("kubectl run tiller --image=rancher/tiller:v2.11.0", "create_tiller")
KubectlClient::Get.resource_wait_for_install("pod", "tiller")
KubectlClient::Wait.resource_wait_for_install("pod", "tiller")
result = ShellCmd.run_testsuite("platform:helm_tiller")
result[:status].success?.should be_true
(/(FAILED).*(Containers with the Helm Tiller image are running)/ =~ result[:output]).should_not be_nil
ensure
KubectlClient::Delete.command("pod/tiller")
KubectlClient::Get.resource_wait_for_uninstall("pod", "tiller")
KubectlClient::Delete.resource("pod", "tiller")
KubectlClient::Wait.resource_wait_for_uninstall("pod", "tiller")
end

it "'helm_tiller' should fail if Helm Tiller is running in the cluster", tags: ["platform:security"] do
Expand Down
4 changes: 2 additions & 2 deletions spec/utils/utils_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -84,12 +84,12 @@ describe "Utils" do
Log.info { "white_list_container_names #{white_list_container_names.inspect}" }
violation_list = [] of String
resource_response = CNFManager.workload_resource_test(args, config) do |resource, container, initialized|
privileged_list = KubectlClient::Get.privileged_containers
privileged_list = KubectlClient::Get.privileged_containers.dig("name").as_a.uniq
resource_containers = KubectlClient::Get.resource_containers(resource["kind"],resource["name"],resource["namespace"])
resource_containers_list = (JSON.parse(resource_containers.to_json).as_a).map { |element| element["name"] }
# Only check the containers that are in the deployed helm chart or manifest
(privileged_list & (resource_containers_list - white_list_container_names)).each do |x|
violation_list << x
violation_list << x.as_s
end
if violation_list.size > 0
false
Expand Down
4 changes: 2 additions & 2 deletions spec/workload/configuration_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ describe CnfTestSuite do
(/(FAILED).*(Resources are created in the default namespace)/ =~ result[:output]).should_not be_nil
ensure
result = ShellCmd.cnf_uninstall()
KubectlClient::Utils.wait_for_terminations()
KubectlClient::Wait.wait_for_terminations()
end
end

Expand All @@ -331,7 +331,7 @@ describe CnfTestSuite do
(/(PASSED).*(default namespace is not being used)/ =~ result[:output]).should_not be_nil
ensure
result = ShellCmd.cnf_uninstall()
KubectlClient::Utils.wait_for_terminations()
KubectlClient::Wait.wait_for_terminations()
end
end

Expand Down
14 changes: 7 additions & 7 deletions spec/workload/microservice_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -61,18 +61,18 @@
ensure
result = ShellCmd.cnf_uninstall()
result[:status].success?.should be_true
KubectlClient::Delete.command("pvc data-test-mariadb-0 -n wordpress")
KubectlClient::Delete.resource("pvc", "data-test-mariadb-0", "wordpress")
end
end

it "'shared_database' should pass if two services on the cluster connect to the same database but they are not in the helm chart of the cnf", tags: ["shared_database"] do
begin
ShellCmd.cnf_install("cnf-path=sample-cnfs/sample_coredns")
KubectlClient::Create.namespace(DEFAULT_CNF_NAMESPACE)
KubectlClient::Apply.namespace(DEFAULT_CNF_NAMESPACE)
ShellCmd.run("kubectl label namespace #{DEFAULT_CNF_NAMESPACE} pod-security.kubernetes.io/enforce=privileged", "Label.namespace")
Helm.install("-n #{DEFAULT_CNF_NAMESPACE} multi-db sample-cnfs/ndn-multi-db-connections-fail/wordpress/")
KubectlClient::Get.resource_wait_for_install(kind: "Deployment", resource_name: "multi-db-wordpress", wait_count: 180, namespace: DEFAULT_CNF_NAMESPACE)
KubectlClient::Get.resource_wait_for_install(kind: "Deployment", resource_name: "multi-db-wordpress2", wait_count: 180, namespace: DEFAULT_CNF_NAMESPACE)
Helm.install("multi-db", "sample-cnfs/ndn-multi-db-connections-fail/wordpress/", DEFAULT_CNF_NAMESPACE)
KubectlClient::Wait.resource_wait_for_install(kind: "Deployment", resource_name: "multi-db-wordpress", wait_count: 180, namespace: DEFAULT_CNF_NAMESPACE)
KubectlClient::Wait.resource_wait_for_install(kind: "Deployment", resource_name: "multi-db-wordpress2", wait_count: 180, namespace: DEFAULT_CNF_NAMESPACE)
# todo kubctl appy of all resourcesin ndn-multi-db-connections-fail
# todo cnf_install of coredns
# todo run shared_database (should pass)
Expand All @@ -82,8 +82,8 @@
result[:status].success?.should be_true
(/(PASSED).*(No shared database found)/ =~ result[:output]).should_not be_nil
ensure
Helm.delete("multi-db -n #{DEFAULT_CNF_NAMESPACE}")
KubectlClient::Delete.command("pvc data-multi-db-mariadb-0")
Helm.uninstall("multi-db", DEFAULT_CNF_NAMESPACE)
KubectlClient::Delete.resource("pvc", "data-multi-db-mariadb-0")
result = ShellCmd.cnf_uninstall()
result[:status].success?.should be_true
end
Expand Down Expand Up @@ -276,7 +276,7 @@

ShellCmd.cnf_install("cnf-path=./sample-cnfs/sample_good_zombie_handling/")
result = ShellCmd.run_testsuite("zombie_handled verbose")
result[:status].success?.should be_true

Check failure on line 279 in spec/workload/microservice_spec.cr

View workflow job for this annotation

GitHub Actions / Chaos & Oran Tests (zombie)

got: false
(/(PASSED).*(Zombie handled)/ =~ result[:output]).should_not be_nil
ensure
result = ShellCmd.cnf_uninstall()
Expand All @@ -288,7 +288,7 @@

ShellCmd.cnf_install("cnf-path=./sample-cnfs/sample-bad-zombie/")
result = ShellCmd.run_testsuite("zombie_handled verbose")
result[:status].success?.should be_true

Check failure on line 291 in spec/workload/microservice_spec.cr

View workflow job for this annotation

GitHub Actions / Chaos & Oran Tests (zombie)

got: false
(/(FAILED).*(Zombie not handled)/ =~ result[:output]).should_not be_nil
ensure
result = ShellCmd.cnf_uninstall()
Expand Down
12 changes: 6 additions & 6 deletions spec/workload/observability_spec.cr
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ describe "Observability" do
install_cmd = "#{helm} install -n #{TESTSUITE_NAMESPACE} --set alertmanager.persistentVolume.enabled=false --set server.persistentVolume.enabled=false --set pushgateway.persistentVolume.enabled=false prometheus prometheus-community/prometheus"
ShellCmd.run(install_cmd, "helm_install_prometheus", force_output: true)

KubectlClient::Get.wait_for_install("prometheus-server", namespace: TESTSUITE_NAMESPACE)
KubectlClient::Wait.resource_wait_for_install("deployment", "prometheus-server", namespace: TESTSUITE_NAMESPACE)
ShellCmd.run("kubectl describe deployment prometheus-server -n #{TESTSUITE_NAMESPACE}", "k8s_describe_prometheus", force_output: true)

test_result = ShellCmd.run_testsuite("prometheus_traffic")
Expand Down Expand Up @@ -73,7 +73,7 @@ describe "Observability" do
helm = Helm::BinarySingleton.helm
result = ShellCmd.run("helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", force_output: true)
result = ShellCmd.run("#{helm} install -n #{TESTSUITE_NAMESPACE} --set alertmanager.persistentVolume.enabled=false --set server.persistentVolume.enabled=false --set pushgateway.persistentVolume.enabled=false prometheus prometheus-community/prometheus", force_output: true)
KubectlClient::Get.wait_for_install("prometheus-server", namespace: TESTSUITE_NAMESPACE)
KubectlClient::Wait.resource_wait_for_install("deployment", "prometheus-server", namespace: TESTSUITE_NAMESPACE)
result = ShellCmd.run("kubectl describe deployment prometheus-server", force_output: true)
#todo logging on prometheus pod

Expand All @@ -91,7 +91,7 @@ describe "Observability" do
Log.info { "Installing prometheus server" }
helm = Helm::BinarySingleton.helm
result = ShellCmd.run("#{helm} install -n #{TESTSUITE_NAMESPACE} --set alertmanager.persistentVolume.enabled=false --set server.persistentVolume.enabled=false --set pushgateway.persistentVolume.enabled=false prometheus prometheus-community/prometheus", force_output: true)
KubectlClient::Get.wait_for_install("prometheus-server", namespace: TESTSUITE_NAMESPACE)
KubectlClient::Wait.resource_wait_for_install("deployment", "prometheus-server", namespace: TESTSUITE_NAMESPACE)
result = ShellCmd.run("kubectl describe deployment prometheus-server -n #{TESTSUITE_NAMESPACE}", force_output: true)
#todo logging on prometheus pod

Expand All @@ -109,7 +109,7 @@ describe "Observability" do
Log.info { "Installing prometheus server" }
helm = Helm::BinarySingleton.helm
result = ShellCmd.run("#{helm} install -n #{TESTSUITE_NAMESPACE} --set alertmanager.persistentVolume.enabled=false --set server.persistentVolume.enabled=false --set pushgateway.persistentVolume.enabled=false prometheus prometheus-community/prometheus", force_output: true)
KubectlClient::Get.wait_for_install("prometheus-server", namespace: TESTSUITE_NAMESPACE)
KubectlClient::Wait.resource_wait_for_install("deployment", "prometheus-server", namespace: TESTSUITE_NAMESPACE)
result = ShellCmd.run("kubectl describe deployment prometheus-server -n #{TESTSUITE_NAMESPACE}", force_output: true)
#todo logging on prometheus pod

Expand Down Expand Up @@ -148,9 +148,9 @@ describe "Observability" do
ShellCmd.cnf_install("cnf-config=sample-cnfs/sample-coredns-cnf/cnf-testsuite.yml")
Helm.helm_repo_add("bitnami","https://charts.bitnami.com/bitnami")
#todo #helm install --values ./override.yml fluentd ./fluentd
Helm.install("--values ./spec/fixtures/fluentd-values-bad.yml -n #{TESTSUITE_NAMESPACE} fluentd bitnami/fluentd")
Helm.install("fluentd", "bitnami/fluentd", namespace: TESTSUITE_NAMESPACE, values: "--values ./spec/fixtures/fluentd-values-bad.yml")
Log.info { "Installing FluentD daemonset" }
KubectlClient::Get.resource_wait_for_install("Daemonset", "fluentd", namespace: TESTSUITE_NAMESPACE)
KubectlClient::Wait.resource_wait_for_install("Daemonset", "fluentd", namespace: TESTSUITE_NAMESPACE)

result = ShellCmd.run_testsuite("routed_logs")
(/(FAILED).*(Your CNF's logs are not being captured)/ =~ result[:output]).should_not be_nil
Expand Down
Loading
Loading