aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kud/deployment_infra/playbooks/configure-topology-manager.yml66
-rw-r--r--kud/deployment_infra/playbooks/kud-vars.yml7
-rwxr-xr-xkud/hosting_providers/vagrant/installer.sh4
-rwxr-xr-xkud/tests/topology-manager.sh112
-rwxr-xr-xkud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml4
-rwxr-xr-xsrc/dcm/test/dcm_call_api.sh61
6 files changed, 208 insertions, 46 deletions
diff --git a/kud/deployment_infra/playbooks/configure-topology-manager.yml b/kud/deployment_infra/playbooks/configure-topology-manager.yml
new file mode 100644
index 00000000..012bc8b0
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-topology-manager.yml
@@ -0,0 +1,66 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2020
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- hosts: kube-node
+ tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+
+ - name: creating kubelet config
+ become: yes
+ blockinfile:
+ path: "{{ kubernetes_config_file }}"
+ marker: "# {mark} OpenNESS configuration - General"
+ create: yes
+ block: |
+ featureGates:
+ TopologyManager: {{ False if topology_manager.policy == 'none' else True }}
+ notify:
+ - enable and restart kubelet
+
+ - name: customize kubelet config - CPU Manager
+ become: yes
+ blockinfile:
+ path: "{{ kubernetes_config_file }}"
+ marker: "# {mark} OpenNESS configuration - CPU Manager"
+ block: |
+ cpuManagerPolicy: {{ cpu_manager.policy }}
+ state: "{{ 'present' if cpu_manager.policy == 'static' else 'absent' }}"
+ notify:
+ - remove cpu manager checkpoint file
+ - enable and restart kubelet
+
+ - name: customize kubelet config - Topology Manager
+ become: yes
+ blockinfile:
+ path: "{{ kubernetes_config_file }}"
+ marker: "# {mark} OpenNESS configuration - Topology Manager"
+ block: |
+ topologyManagerPolicy: {{ topology_manager.policy }}
+ state: "{{ 'absent' if topology_manager.policy == 'none' else 'present' }}"
+ notify:
+ - enable and restart kubelet
+
+ handlers:
+ - name: enable and restart kubelet
+ become: yes
+ systemd:
+ name: kubelet
+ daemon_reload: yes
+ enabled: yes
+ masked: no
+ state: restarted
+
+ - name: remove cpu manager checkpoint file
+ become: yes
+ file:
+ path: "{{ cpu_manager.checkpoint_file }}"
+ state: absent
diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml
index 4988a473..30e54f03 100644
--- a/kud/deployment_infra/playbooks/kud-vars.yml
+++ b/kud/deployment_infra/playbooks/kud-vars.yml
@@ -79,3 +79,10 @@ optane_ipmctl_source_type: "tarball"
optane_ipmctl_version: 02.00.00.3474
optane_ipmctl_url: "https://launchpad.net/ubuntu/+archive/primary/+sourcefiles/ipmctl/{{ optane_package }}.tar.xz"
optane_ipmctl_package: ipmctl_02.00.00.3474+really01.00.00.3469.orig
+
+kubernetes_config_file: "/etc/kubernetes/kubelet-config.yaml"
+cpu_manager:
+ policy: "static" # Options: none (disabled), static (default)
+ checkpoint_file: "/var/lib/kubelet/cpu_manager_state"
+topology_manager:
+ policy: "best-effort" # Options: none (disabled), best-effort (default), restricted, single-numa-node
diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh
index 27ab7fc1..71e4d8b7 100755
--- a/kud/hosting_providers/vagrant/installer.sh
+++ b/kud/hosting_providers/vagrant/installer.sh
@@ -155,13 +155,13 @@ function install_addons {
_install_ansible
sudo ansible-galaxy install $verbose -r $kud_infra_folder/galaxy-requirements.yml --ignore-errors
ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml | sudo tee $log_folder/setup-kud.log
- for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat optane cmk}; do
+ for addon in ${KUD_ADDONS:-topology-manager virtlet ovn4nfv nfd sriov qat optane cmk}; do
echo "Deploying $addon using configure-$addon.yml playbook.."
ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-${addon}.yml | sudo tee $log_folder/setup-${addon}.log
done
echo "Run the test cases if testing_enabled is set to true."
if [[ "${testing_enabled}" == "true" ]]; then
- for addon in ${KUD_ADDONS:-multus virtlet ovn4nfv nfd sriov qat optane cmk}; do
+ for addon in ${KUD_ADDONS:-multus topology-manager virtlet ovn4nfv nfd sriov qat optane cmk}; do
pushd $kud_tests
bash ${addon}.sh
popd
diff --git a/kud/tests/topology-manager.sh b/kud/tests/topology-manager.sh
new file mode 100755
index 00000000..bbffd4d7
--- /dev/null
+++ b/kud/tests/topology-manager.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2020
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source _common.sh
+source _functions.sh
+
+ethernet_adpator_version=$( lspci | grep "Ethernet Controller XL710" | head -n 1 | cut -d " " -f 8 )
+if [ -z "$ethernet_adpator_version" ]; then
+ echo " Ethernet adapator version is not set. Topology manager test case cannot run on this machine"
+ exit 0
+else
+ echo "NIC card specs match. Topology manager option avaiable for this version."
+fi
+
+pod_name=pod-topology-manager
+csar_id=bd55cccc-bf34-11ea-b3de-0242ac130004
+
+function create_pod_yaml {
+ local csar_id=$1
+ _checks_args $csar_id
+ pushd ${CSAR_DIR}/${csar_id}
+
+ cat << POD > $pod_name.yaml
+kind: Pod
+apiVersion: v1
+metadata:
+ name: $pod_name
+ annotations:
+ k8s.v1.cni.cncf.io/networks: sriov-eno2
+spec:
+ containers:
+ - name: $pod_name
+ image: docker.io/centos/tools:latest
+ command:
+ - /sbin/init
+ resources:
+ limits:
+ cpu: "1"
+ memory: "500Mi"
+ intel.com/intel_sriov_700: '1'
+ requests:
+ cpu: "1"
+ memory: "500Mi"
+ intel.com/intel_sriov_700: '1'
+POD
+ popd
+}
+
+create_pod_yaml ${csar_id}
+kubectl delete pod $pod_name --ignore-not-found=true --now --wait
+kubectl create -f ${CSAR_DIR}/${csar_id}/$pod_name.yaml --validate=false
+
+status_phase=""
+while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $pod_name | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $pod_name : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Running" ]]; then
+ echo "Pod is up and running.."
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+done
+
+container_id=$(kubectl describe pod $pod_name | grep "Container ID" | awk '{print $3}' )
+container_id=${container_id#docker://}
+container_id=${container_id:0:12}
+
+apt-get install -y jq
+cpu_core=$(cat /var/lib/kubelet/cpu_manager_state | jq -r .| grep ${container_id} | awk -F ':' '{print $2}'| awk -F '"' '{print $2}')
+numa_node_number=$(lscpu | grep "NUMA node(s)" | awk -F ':' '{print $2}')
+for (( node=0; node<$numa_node_number; node++ )); do
+ ranges=$(lscpu | grep "NUMA node"$node | awk -F ':' '{print $2}')
+ ranges=(${ranges//,/ })
+ for range in ${ranges[@]}; do
+ min=$(echo $range | awk -F '-' '{print $1}')
+ max=$(echo $range | awk -F '-' '{print $2}')
+ if [ $cpu_core -ge $min ] && [ $cpu_core -le $max ]; then
+ cpu_numa_node=$node
+ fi
+ done
+done
+
+vf_pci=$(kubectl exec -it $pod_name env | grep PCIDEVICE_INTEL_COM_INTEL_SRIOV_700 | awk -F '=' '{print $2}' | sed 's/\r//g')
+vf_numa_node=$(cat /sys/bus/pci/devices/$vf_pci/numa_node)
+
+echo "The allocated cpu core is:" $cpu_core
+echo "The numa node of the allocated cpu core is:" $cpu_numa_node
+echo "The PCI address of the allocated vf is:" $vf_pci
+echo "The numa node of the allocated vf is:" $vf_numa_node
+if [ $cpu_numa_node == $vf_numa_node ]; then
+ echo "The allocated cpu core and vf are on the same numa node"
+else
+ echo "The allocated cpu core and vf are on different numa nodes"
+fi
+
+kubectl delete pod $pod_name --now
+echo "Test complete."
diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml
index f4332e7f..f296ca50 100755
--- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml
+++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml
@@ -1162,11 +1162,11 @@ prometheusOperator:
## Deploy CRDs used by Prometheus Operator.
##
- createCustomResource: false
+ createCustomResource: true
## Attempt to clean up CRDs created by Prometheus Operator.
##
- cleanupCustomResource: false
+ cleanupCustomResource: true
## Labels to add to the operator pod
##
diff --git a/src/dcm/test/dcm_call_api.sh b/src/dcm/test/dcm_call_api.sh
index 966bc3d6..33fbf314 100755
--- a/src/dcm/test/dcm_call_api.sh
+++ b/src/dcm/test/dcm_call_api.sh
@@ -14,7 +14,6 @@
# * limitations under the License.
# */
-
dcm_addr="http://localhost:9077"
# parameters
@@ -22,6 +21,7 @@ project="test-project"
description="test-description"
logical_cloud_name="lc1"
namespace="ns1"
+api_groups=""
user="user-1"
permission="permission-1"
cluster_provider_name="cp-1"
@@ -52,7 +52,7 @@ logical_cloud_data="$(cat << EOF
"type" : "certificate",
"user-permissions" : [
{ "permission-name" : "${permission}",
- "apiGroups" : ["stable.example.com"],
+ "apiGroups" : ["${api_groups}"],
"resources" : ["secrets", "pods"],
"verbs" : ["get", "watch", "list", "create"]
}
@@ -99,8 +99,6 @@ cluster_2_data="$(cat << EOF
EOF
)"
-# removed all special chars from quota spec keys
-# due to loss of data when unmarshalling from json
quota_data="$(cat << EOF
{
"metadata" : {
@@ -108,54 +106,34 @@ quota_data="$(cat << EOF
"description": "${description}"
},
"spec" : {
- "persistentvolumeclaims" : "10",
- "pods": "500",
- "configmaps" : "10",
- "replicationcontrollers": "10",
- "resourcequotas" : "10",
- "services": "10",
- "secrets" : "10"
- }
-}
-EOF
-)"
-
-quota_data_original="$(cat << EOF
-{
- "metadata" : {
- "name" : "${quota_name}",
- "description": "${description}"
- },
- "spec" : {
"limits.cpu": "400",
"limits.memory": "1000Gi",
"requests.cpu": "300",
"requests.memory": "900Gi",
"requests.storage" : "500Gi",
- "requests.ephemeral-storage": "",
- "limits.ephemeral-storage": "",
- "persistentvolumeclaims" : " ",
+ "requests.ephemeral-storage": "500",
+ "limits.ephemeral-storage": "500",
+ "persistentvolumeclaims" : "500",
"pods": "500",
- "configmaps" : "",
- "replicationcontrollers": "",
- "resourcequotas" : "",
- "services": "",
- "services.loadbalancers" : "",
- "services.nodeports" : "",
- "secrets" : "",
- "count/replicationcontrollers" : "",
- "count/deployments.apps" : "",
- "count/replicasets.apps" : "",
- "count/statefulsets.apps" : "",
- "count/jobs.batch" : "",
- "count/cronjobs.batch" : "",
- "count/deployments.extensions" : ""
+ "configmaps" : "1000",
+ "replicationcontrollers": "500",
+ "resourcequotas" : "500",
+ "services": "500",
+ "services.loadbalancers" : "500",
+ "services.nodeports" : "500",
+ "secrets" : "500",
+ "count/replicationcontrollers" : "500",
+ "count/deployments.apps" : "500",
+ "count/replicasets.apps" : "500",
+ "count/statefulsets.apps" : "500",
+ "count/jobs.batch" : "500",
+ "count/cronjobs.batch" : "500",
+ "count/deployments.extensions" : "500"
}
}
EOF
)"
-
# Create logical cloud
printf "\n\nCreating logical cloud data\n\n"
curl -d "${logical_cloud_data}" -X POST ${logical_cloud_url}
@@ -169,7 +147,6 @@ curl -d "${cluster_2_data}" -X POST ${cluster_url}
printf "\n\nAdding resource quota for the logical cloud\n\n"
curl -d "${quota_data}" -X POST ${quota_url}
-
# Get logical cloud data
printf "\n\nGetting logical cloud\n\n"
curl -X GET "${logical_cloud_url}/${logical_cloud_name}"