aboutsummaryrefslogtreecommitdiffstats
path: root/kud/tests
diff options
context:
space:
mode:
Diffstat (limited to 'kud/tests')
-rw-r--r--kud/tests/_common.sh4
-rwxr-xr-xkud/tests/_functions.sh5
-rwxr-xr-xkud/tests/emco.sh122
-rw-r--r--kud/tests/kata-clh.yml26
-rw-r--r--kud/tests/kata-qemu.yml26
-rwxr-xr-xkud/tests/kata.sh47
-rwxr-xr-xkud/tests/multus.sh4
-rwxr-xr-xkud/tests/ovn4nfv.sh4
-rwxr-xr-xkud/tests/qat.sh2
-rwxr-xr-xkud/tests/sriov-network.sh102
-rwxr-xr-xkud/tests/sriov.sh2
-rwxr-xr-xkud/tests/topology-manager.sh3
12 files changed, 306 insertions, 41 deletions
diff --git a/kud/tests/_common.sh b/kud/tests/_common.sh
index b56972c8..ff975544 100644
--- a/kud/tests/_common.sh
+++ b/kud/tests/_common.sh
@@ -1108,8 +1108,8 @@ spec:
app: ovn4nfv
annotations:
k8s.v1.cni.cncf.io/networks: '[{ "name": "$ovn_multus_network_name"}]'
- k8s.plugin.opnfv.org/nfn-network: '{ "type": "ovn4nfv", "interface": [{ "name": "ovn-port-net", "interface": "net0" , "defaultGateway": "false"},
- { "name": "ovn-priv-net", "interface": "net1" , "defaultGateway": "false"}]}'
+ k8s.plugin.opnfv.org/nfn-network: '{ "type": "ovn4nfv", "interface": [{ "name": "ovn-port-net", "interface": "net2" , "defaultGateway": "false"},
+ { "name": "ovn-priv-net", "interface": "net3" , "defaultGateway": "false"}]}'
spec:
containers:
- name: $ovn4nfv_deployment_name
diff --git a/kud/tests/_functions.sh b/kud/tests/_functions.sh
index 720470eb..7a3e97ab 100755
--- a/kud/tests/_functions.sh
+++ b/kud/tests/_functions.sh
@@ -25,6 +25,11 @@ function print_msg {
echo -e "${RED} $msg ---------------------------------------${NC}"
}
+function ssh_cluster {
+ master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F '[:/]' '{print $4}')
+ ssh -o StrictHostKeyChecking=no ${master_ip} -- "$@"
+}
+
function get_ovn_central_address {
#Reuse OVN_CENTRAL_ADDRESS if available (bypassable by --force flag)
if [[ "${1:-}" != "--force" ]] && [[ -n "${OVN_CENTRAL_ADDRESS:-}" ]]; then
diff --git a/kud/tests/emco.sh b/kud/tests/emco.sh
index 2b8eab1e..7cc3ca33 100755
--- a/kud/tests/emco.sh
+++ b/kud/tests/emco.sh
@@ -1,19 +1,7 @@
#!/bin/bash
-# Copyright 2020 Intel Corporation, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
set -o errexit
set -o nounset
@@ -35,6 +23,7 @@ rsync_service_port=30441
rsync_service_host="$master_ip"
base_url_orchestrator=${base_url_orchestrator:-"http://$master_ip:30415/v2"}
base_url_clm=${base_url_clm:-"http://$master_ip:30461/v2"}
+base_url_dcm=${base_url_dcm:-"http://$master_ip:30477/v2"}
CSAR_DIR="/opt/csar"
csar_id="cb009bfe-bbee-11e8-9766-525400435678"
@@ -94,6 +83,41 @@ labeldata="$(cat<<EOF
EOF
)"
+admin_logical_cloud_name="lcadmin"
+admin_logical_cloud_data="$(cat << EOF
+{
+ "metadata" : {
+ "name": "${admin_logical_cloud_name}",
+ "description": "logical cloud description",
+ "userData1":"<user data>",
+ "userData2":"<user data>"
+ },
+ "spec" : {
+ "level": "0"
+ }
+ }
+}
+EOF
+)"
+
+lc_cluster_1_name="lc1-c1"
+cluster_1_data="$(cat << EOF
+{
+ "metadata" : {
+ "name": "${lc_cluster_1_name}",
+ "description": "logical cloud cluster 1 description",
+ "userData1":"<user data>",
+ "userData2":"<user data>"
+ },
+
+ "spec" : {
+ "cluster-provider": "${clusterprovidername}",
+ "cluster-name": "${clustername}",
+ "loadbalancer-ip" : "0.0.0.0"
+ }
+}
+EOF
+)"
# add the rsync controller entry
rsynccontrollername="rsync"
@@ -316,7 +340,7 @@ deployment_intent_group_data="$(cat <<EOF
"profile":"${collection_composite_profile_name}",
"version":"${release}",
"override-values":[],
- "logical-cloud":"unused_logical_cloud"
+ "logical-cloud":"${admin_logical_cloud_name}"
}
}
EOF
@@ -352,6 +376,8 @@ function createOrchestratorData {
print_msg "creating project entry"
call_api -d "${projectdata}" "${base_url_orchestrator}/projects"
+ createLogicalCloudData
+
print_msg "creating collection composite app entry"
call_api -d "${compositeapp_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps"
@@ -403,27 +429,30 @@ function deleteOrchestratorData {
print_msg "Begin deleteOrchestratorData"
- delete_resource "${base_url_orchestrator}/controllers/${rsynccontrollername}"
+ delete_resource_nox "${base_url_orchestrator}/controllers/${rsynccontrollername}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${prometheus_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${collectd_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${prometheus_placement_intent_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${collectd_placement_intent_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${prometheus_profile_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${collectd_profile_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${prometheus_profile_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${collectd_profile_name}"
delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${prometheus_app_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${prometheus_app_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${collectd_app_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${collectd_app_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}"
+
+ deleteLogicalCloud
+
delete_resource_nox "${base_url_orchestrator}/projects/${projectname}"
print_msg "deleteOrchestratorData done"
@@ -443,12 +472,28 @@ function createClmData {
function deleteClmData {
print_msg "begin deleteClmData"
- delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels/${labelname}"
+ delete_resource_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels/${labelname}"
delete_resource_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}"
- delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}"
+ delete_resource_nox "${base_url_clm}/cluster-providers/${clusterprovidername}"
print_msg "deleteClmData done"
}
+function createLogicalCloudData {
+ print_msg "creating logical cloud"
+ call_api -d "${admin_logical_cloud_data}" "${base_url_dcm}/projects/${projectname}/logical-clouds"
+ call_api -d "${cluster_1_data}" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/cluster-references"
+}
+
+function getLogicalCloudData {
+ call_api_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}"
+ call_api_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/cluster-references/${lc_cluster_1_name}"
+}
+
+function deleteLogicalCloud {
+ delete_resource_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/cluster-references/${lc_cluster_1_name}"
+ delete_resource_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}"
+}
+
function createData {
createClmData
createOrchestratorData
@@ -460,13 +505,25 @@ function deleteData {
}
function instantiate {
+ call_api -d "{ }" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/instantiate"
call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/approve"
- call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"
+ # instantiate may fail due to the logical cloud not yet instantiated, so retry
+ try=0
+ until call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"; do
+ if [[ $try -lt 10 ]]; then
+ sleep 1s
+ else
+ return 1
+ fi
+ try=$((try + 1))
+ done
+ return 0
}
-
function terminateOrchData {
+ call_api -d "{ }" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/terminate"
call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/terminate"
+ call_api -d "{ }" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/terminate"
}
function status {
@@ -479,13 +536,13 @@ function waitFor {
# Setup
-function setup {
+function setupEmcoTest {
install_deps
populate_CSAR_composite_app_helm "$csar_id"
}
function start {
- setup
+ setupEmcoTest
deleteData
print_msg "Before creating, deleting the data success"
createData
@@ -516,6 +573,7 @@ function usage {
if [[ "$#" -gt 0 ]] ; then
case "$1" in
+ "setup" ) setupEmcoTest ;;
"start" ) start ;;
"stop" ) stop ;;
"create" ) createData ;;
diff --git a/kud/tests/kata-clh.yml b/kud/tests/kata-clh.yml
new file mode 100644
index 00000000..6498213f
--- /dev/null
+++ b/kud/tests/kata-clh.yml
@@ -0,0 +1,26 @@
+---
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+kind: Pod
+apiVersion: v1
+metadata:
+ name: kata-clh
+spec:
+ runtimeClassName: kata-clh
+ containers:
+ - name: busybox
+ image: busybox
+ imagePullPolicy: Always
+ command: [ "sleep", "100000" ] \ No newline at end of file
diff --git a/kud/tests/kata-qemu.yml b/kud/tests/kata-qemu.yml
new file mode 100644
index 00000000..d95748e2
--- /dev/null
+++ b/kud/tests/kata-qemu.yml
@@ -0,0 +1,26 @@
+---
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+kind: Pod
+apiVersion: v1
+metadata:
+ name: kata-qemu
+spec:
+ runtimeClassName: kata-qemu
+ containers:
+ - name: busybox
+ image: busybox
+ imagePullPolicy: Always
+ command: [ "sleep", "100000" ] \ No newline at end of file
diff --git a/kud/tests/kata.sh b/kud/tests/kata.sh
new file mode 100755
index 00000000..f55d8cd3
--- /dev/null
+++ b/kud/tests/kata.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+#source _common_test.sh
+#source _common.sh
+#source _functions.sh
+
+kata_pods="kata-qemu kata-clh"
+
+function wait_for_pod {
+ status_phase=""
+ while [[ "$status_phase" != "Running" ]]; do
+ new_phase="$(kubectl get pods -o wide | grep ^$1 | awk '{print $3}')"
+ if [[ "$new_phase" != "$status_phase" ]]; then
+ status_phase="$new_phase"
+ fi
+ if [[ "$new_phase" == "Err"* ]]; then
+ exit 1
+ fi
+ sleep 2
+ done
+}
+
+for pod in ${kata_pods};do
+ echo "Deploying ${pod} pod"
+ kubectl apply -f ${pod}.yml
+ wait_for_pod ${pod}
+ echo "Pod ${pod} deployed successfully"
+ kubectl delete -f ${pod}.yml
+done
diff --git a/kud/tests/multus.sh b/kud/tests/multus.sh
index ad3a3909..4f94791f 100755
--- a/kud/tests/multus.sh
+++ b/kud/tests/multus.sh
@@ -41,7 +41,7 @@ NET
function generate_CRD_for_macvlan_cni {
local csar_id=$1
- local master_name=`route | grep 'default' | awk '{print $8}' |head -n 1`
+ local master_name=$(ssh_cluster route | grep 'default' | awk '{print $8}' |head -n 1)
_checks_args $csar_id
pushd ${CSAR_DIR}/${csar_id}
@@ -67,7 +67,7 @@ NET
function generate_CRD_for_ipvlan_cni {
local csar_id=$1
- local master_name=`route | grep 'default' | awk '{print $8}' |head -n 1`
+ local master_name=$(ssh_cluster route | grep 'default' | awk '{print $8}' |head -n 1)
_checks_args $csar_id
pushd ${CSAR_DIR}/${csar_id}
diff --git a/kud/tests/ovn4nfv.sh b/kud/tests/ovn4nfv.sh
index cd2664ad..e25c2f09 100755
--- a/kud/tests/ovn4nfv.sh
+++ b/kud/tests/ovn4nfv.sh
@@ -34,8 +34,8 @@ echo "===== $deployment_pod details ====="
kubectl exec -it $deployment_pod -- ip a
ovn_nic=$(kubectl exec -it $deployment_pod -- ip a )
-if [[ $ovn_nic != *"net1"* ]]; then
- echo "The $deployment_pod pod doesn't contain the net1 nic"
+if [[ $ovn_nic != *"net3"* ]]; then
+ echo "The $deployment_pod pod doesn't contain the net3 nic"
exit 1
else
echo "Test Completed!"
diff --git a/kud/tests/qat.sh b/kud/tests/qat.sh
index 8365f700..11fb6ca0 100755
--- a/kud/tests/qat.sh
+++ b/kud/tests/qat.sh
@@ -10,7 +10,7 @@
set -o pipefail
-qat_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.capacity."qat.intel.com/cy2_dc2">="1") | .metadata.name')
+qat_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."qat.intel.com/cy2_dc2"|tonumber)>=1) | .metadata.name')
if [ -z "$qat_capable_nodes" ]; then
echo "This test case cannot run. QAT device unavailable."
QAT_ENABLED=False
diff --git a/kud/tests/sriov-network.sh b/kud/tests/sriov-network.sh
new file mode 100755
index 00000000..3191c2f3
--- /dev/null
+++ b/kud/tests/sriov-network.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o pipefail
+
+sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_nic"|tonumber)>=2) | .metadata.name')
+if [ -z "$sriov_capable_nodes" ]; then
+ echo "SRIOV test case cannot run on the cluster."
+ exit 0
+else
+ echo "SRIOV option avaiable in the cluster."
+fi
+
+pod_name=pod-case-01
+
+function create_pod_yaml_with_single_VF {
+
+cat << POD > $HOME/$pod_name-single.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-case-01
+ annotations:
+ k8s.v1.cni.cncf.io/networks: sriov-intel
+spec:
+ containers:
+ - name: test-pod
+ image: docker.io/centos/tools:latest
+ command:
+ - /sbin/init
+ resources:
+ requests:
+ intel.com/intel_sriov_nic: '1'
+ limits:
+ intel.com/intel_sriov_nic: '1'
+POD
+}
+
+function create_pod_yaml_with_multiple_VF {
+
+cat << POD > $HOME/$pod_name-multiple.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-case-01
+ annotations:
+ k8s.v1.cni.cncf.io/networks: sriov-intel, sriov-intel
+spec:
+ containers:
+ - name: test-pod
+ image: docker.io/centos/tools:latest
+ command:
+ - /sbin/init
+ resources:
+ requests:
+ intel.com/intel_sriov_nic: '2'
+ limits:
+ intel.com/intel_sriov_nic: '2'
+POD
+}
+create_pod_yaml_with_single_VF
+create_pod_yaml_with_multiple_VF
+
+for podType in ${POD_TYPE:-single multiple}; do
+
+ kubectl delete pod $pod_name --ignore-not-found=true --now --wait
+ allocated_node_resource=$(kubectl describe node | grep "intel.com/intel_sriov_nic" | tail -n1 |awk '{print $(NF)}')
+
+ echo "The allocated resource of the node is: " $allocated_node_resource
+
+ kubectl create -f $HOME/$pod_name-$podType.yaml --validate=false
+
+ for pod in $pod_name; do
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $pod-$podType : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Running" ]]; then
+ echo "Pod is up and running.."
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ done
+ allocated_node_resource=$(kubectl describe node | grep "intel.com/intel_sriov_nic" | tail -n1 |awk '{print $(NF)}')
+
+ echo " The current resource allocation after the pod creation is: " $allocated_node_resource
+ kubectl delete pod $pod_name --now
+ echo "Test complete."
+
+done
diff --git a/kud/tests/sriov.sh b/kud/tests/sriov.sh
index e617ea62..7aa97f0c 100755
--- a/kud/tests/sriov.sh
+++ b/kud/tests/sriov.sh
@@ -10,7 +10,7 @@
set -o pipefail
-sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.capacity."intel.com/intel_sriov_700">="2") | .metadata.name')
+sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_700"|tonumber)>=2) | .metadata.name')
if [ -z "$sriov_capable_nodes" ]; then
echo "SRIOV test case cannot run on the cluster."
exit 0
diff --git a/kud/tests/topology-manager.sh b/kud/tests/topology-manager.sh
index 7d434386..5c9f900d 100755
--- a/kud/tests/topology-manager.sh
+++ b/kud/tests/topology-manager.sh
@@ -15,7 +15,8 @@ set -o pipefail
source _common.sh
source _functions.sh
-if [ -z "$( lspci | grep "Ethernet Controller XL710" | head -n 1 | cut -d " " -f 8 )" ]; then
+adaptors="X710 XL710 X722"
+if [[ $(lspci | grep -c "Ethernet .* \(${adaptors// /\\|}\)") == "0" ]]; then
echo "Ethernet adaptor version is not set. Topology manager test case cannot run on this machine"
exit 0
else