diff options
author | Liang Ding <liang.ding@intel.com> | 2019-10-30 23:47:35 -0700 |
---|---|---|
committer | Liang Ding <liang.ding@intel.com> | 2020-05-05 15:41:48 +0000 |
commit | cc10b9aff3fd16df62c0655ec1626624ad2e2fc0 (patch) | |
tree | 506a942430b6eca7058b5549e445125b98f1e5ab | |
parent | 501d62d194a42c5c85f22d8208a4e1dd6d985492 (diff) |
add cmk in KuD
- deploy cmk related pods
- untaint compute nodes if necessary
- run cmk unit tests: allocate CPUs from exclusive and shared pools
- deploy a testing nginx pod along with cmk testing pods
- preset 1/2 CPUs for shared/exlusive pools to fit CI server machines
users can adjust the parameters to meet their own requirements
Test Results:
- many rounds of vagrant/5 VMs(controller01/02/03 and compute01/02)
based test are all OK
- 14 rounds tests on my local server (S2600WFQ (36C/72T) )and
PC(HP Z228 (4C/4T)) with all-in-one bare metal deployment are all OK
- CI(a 4C/4T machine) results of latest patch set also show that the
test of bare metal deployment is OK
- NOTE: both my local test and CI use the same testing method of calling
aio.sh after applying the latest patch set.
Change-Id: I046a4a63b94f92f23347ab76c21a661521e01119
Issue-ID: MULTICLOUD-879
Signed-off-by: Liang Ding <liang.ding@intel.com>
-rw-r--r-- | kud/deployment_infra/images/cmk.yaml | 294 | ||||
-rw-r--r-- | kud/deployment_infra/playbooks/configure-cmk.yml | 107 | ||||
-rw-r--r-- | kud/deployment_infra/playbooks/kud-vars.yml | 16 | ||||
-rw-r--r-- | kud/deployment_infra/playbooks/preconfigure-cmk.yml | 62 | ||||
-rw-r--r-- | kud/hosting_providers/baremetal/README.md | 4 | ||||
-rwxr-xr-x | kud/hosting_providers/baremetal/aio.sh | 3 | ||||
-rwxr-xr-x | kud/hosting_providers/containerized/installer.sh | 4 | ||||
-rw-r--r-- | kud/hosting_providers/vagrant/Vagrantfile | 24 | ||||
-rw-r--r-- | kud/hosting_providers/vagrant/config/default.yml | 6 | ||||
-rw-r--r-- | kud/hosting_providers/vagrant/config/samples/pdf.yml.aio | 1 | ||||
-rw-r--r-- | kud/hosting_providers/vagrant/config/samples/pdf.yml.mini | 2 | ||||
-rwxr-xr-x | kud/hosting_providers/vagrant/installer.sh | 4 | ||||
-rwxr-xr-x | kud/tests/cmk.sh | 231 |
13 files changed, 747 insertions, 11 deletions
diff --git a/kud/deployment_infra/images/cmk.yaml b/kud/deployment_infra/images/cmk.yaml new file mode 100644 index 00000000..4c048a42 --- /dev/null +++ b/kud/deployment_infra/images/cmk.yaml @@ -0,0 +1,294 @@ +--- +# Source: cmk/templates/serviceaccount.yml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cmk + namespace: kube-system +--- +# Source: cmk/templates/rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cmk-custom-resource-definition-controller + namespace: kube-system +rules: +- apiGroups: ["intel.com"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions", "customresourcedefinitions.extensions"] + verbs: ["*"] +--- +# Source: cmk/templates/rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cmk-daemonset-controller + namespace: kube-system +rules: +- apiGroups: ["extensions"] + resources: ["daemonsets", "daemonsets.extensions"] + verbs: ["*"] +--- +# Source: cmk/templates/rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cmk-third-party-resource-controller + namespace: kube-system +rules: +- apiGroups: ["cmk.intel.com"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["extensions"] + resources: ["thirdpartyresources", "thirdpartyresources.extensions"] + verbs: ["*"] +--- +# Source: cmk/templates/rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cmk-version-controller + namespace: kube-system +rules: + - nonResourceURLs: ["*"] + verbs: + - get +--- +# Source: cmk/templates/rbac.yml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cmk-role-binding-version + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cmk-version-controller +subjects: +- kind: ServiceAccount + name: cmk + namespace: kube-system +--- +# Source: cmk/templates/rbac.yml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cmk-role-binding-daemonset + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cmk-daemonset-controller +subjects: +- kind: ServiceAccount + name: cmk + namespace: kube-system +--- +# Source: cmk/templates/rbac.yml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cmk-role-binding-node + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node +subjects: +- kind: ServiceAccount + name: cmk + namespace: kube-system +--- +# Source: cmk/templates/rbac.yml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cmk-role-binding-tpr + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cmk-third-party-resource-controller +subjects: +- kind: ServiceAccount + name: cmk + namespace: kube-system +--- +# Source: cmk/templates/rbac.yml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cmk-role-binding-crd + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cmk-custom-resource-definition-controller +subjects: +- kind: ServiceAccount + name: cmk + namespace: kube-system +--- +# Source: cmk/templates/daemonset.yml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cmk + labels: + app: cmk + namespace: kube-system +spec: + selector: + matchLabels: + name: cmk + template: + metadata: + labels: + name: cmk + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + serviceAccountName: cmk + tolerations: + - key: cmk + operator: Exists + containers: + - name: reconcile + image: localhost:5000/cmk:v1.4.1 + imagePullPolicy: IfNotPresent + env: + - name: CMK_RECONCILE_SLEEP_TIME + value: '60' + - name: CMK_PROC_FS + value: /proc + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + command: ["/bin/bash", "-c"] + args: + - "/cmk/cmk.py isolate --pool=infra /cmk/cmk.py -- reconcile --interval=$CMK_RECONCILE_SLEEP_TIME --publish" + volumeMounts: + - mountPath: /proc + name: host-proc + readOnly: false + - mountPath: /etc/cmk + name: cmk-conf-dir + - mountPath: /opt/bin + name: cmk-install-dir + - name: nodereport + image: localhost:5000/cmk:v1.4.1 + imagePullPolicy: IfNotPresent + env: + - name: CMK_NODE_REPORT_SLEEP_TIME + value: '60' + - name: CMK_PROC_FS + value: /proc + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + command: ["/bin/bash", "-c"] + args: + - "/cmk/cmk.py isolate --pool=infra /cmk/cmk.py -- node-report --interval=$CMK_NODE_REPORT_SLEEP_TIME --publish" + volumeMounts: + - mountPath: /proc + name: host-proc + readOnly: false + - mountPath: /etc/cmk + name: cmk-conf-dir + - mountPath: /opt/bin + name: cmk-install-dir + initContainers: + - name: init + image: localhost:5000/cmk:v1.4.1 + imagePullPolicy: IfNotPresent + env: + - name: CMK_PROC_FS + value: "/proc" + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + command: ["/bin/bash", "-c"] + args: + - "/cmk/cmk.py init --conf-dir=/etc/cmk --exclusive-mode=packed --num-exclusive-cores=0 --shared-mode=packed --num-shared-cores=0" + volumeMounts: + - mountPath: /proc + name: host-proc + readOnly: false + - mountPath: /etc/cmk + name: cmk-conf-dir + readOnly: false + - mountPath: /opt/bin + name: cmk-install-dir + readOnly: false + - name: discover + image: localhost:5000/cmk:v1.4.1 + imagePullPolicy: IfNotPresent + env: + - name: CMK_PROC_FS + value: /proc + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + command: ["/bin/bash", "-c"] + args: +# - "echo -en '\n'; ls -a /etc/cmk; sleep 10;" + - "until [ -d /etc/cmk ]; do sleep 1; done; /cmk/cmk.py discover" + volumeMounts: + - mountPath: /proc + name: host-proc + readOnly: false + - mountPath: /etc/cmk + name: cmk-conf-dir + readOnly: false + - mountPath: /opt/bin + name: cmk-install-dir + readOnly: false + - name: install + image: localhost:5000/cmk:v1.4.1 + imagePullPolicy: IfNotPresent + env: + - name: CMK_PROC_FS + value: /proc + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + command: ["/bin/bash", "-c"] + args: + - "/cmk/cmk.py install" + volumeMounts: + - mountPath: /proc + name: host-proc + readOnly: false + - mountPath: /etc/cmk + name: cmk-conf-dir + - mountPath: /opt/bin + name: cmk-install-dir +# restartPolicy: Never + volumes: + - hostPath: + path: /proc +# type: "" + name: host-proc + - hostPath: + path: /etc/cmk +# type: "" + name: cmk-conf-dir + - hostPath: + path: /opt/bin +# type: "" + name: cmk-install-dir + diff --git a/kud/deployment_infra/playbooks/configure-cmk.yml b/kud/deployment_infra/playbooks/configure-cmk.yml new file mode 100644 index 00000000..cd2fb50e --- /dev/null +++ b/kud/deployment_infra/playbooks/configure-cmk.yml @@ -0,0 +1,107 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +- import_playbook: preconfigure-cmk.yml +- hosts: localhost + pre_tasks: + - name: Load kud variables + include_vars: + file: kud-vars.yml + vars: + cmk_install_host: '{{ inventory_hostname }}' + + tasks: + - name: build list of CMK nodes to untaint + set_fact: + cmk_hosts_list: "{{ groups['kube-node'] }}" + + - name: generate CMK install yaml file + command: "cp {{ playbook_dir }}/../images/cmk.yaml {{ playbook_dir }}/../images/cmk-{{ cmk_install_host }}.yaml" + + - name: customize CMK install yaml file per runtime env + lineinfile: + dest: "{{ playbook_dir }}/../images/cmk-{{ cmk_install_host }}.yaml" + insertafter: "nodeSelectorTerms:" + line: " - matchFields:\n - key: metadata.name\n operator: In\n values:\n - '{{ item }}'" + register: cus_result + with_items: + - "{{ cmk_hosts_list }}" + + - name: prepare CMK CPU cores per config file + replace: + dest: "{{ playbook_dir }}/../images/cmk-{{ cmk_install_host }}.yaml" + regexp: '{{ item.pool }}=0' + replace: '{{ item.pool }}={{ item.cores }}' + with_items: + - { pool: 'num-shared-cores', cores: '{{ cmk_shared_num_cores }}' } + - { pool: 'num-exclusive-cores', cores: '{{ cmk_exclusive_num_cores }}' } + + - name: install CMK components + command: "/usr/local/bin/kubectl create -f {{ playbook_dir }}/../images/cmk-{{ cmk_install_host }}.yaml" + + - name: wait for all cmk daemonset pods to be running + shell: kubectl get pods -n {{ cmk_namespace }} -l name=cmk -o jsonpath={.items[*].status.phase} + register: kubectl_cmk_running + until: "['Running'] == kubectl_cmk_running.stdout.split() | unique" + retries: 50 + delay: 5 + failed_when: false + + - name: create a script to check CMK setup + copy: + dest: "./cmk-check.sh" + content: | + #!/bin/bash + echo + echo "waiting for cmk-nodereport effective" + status=0 + while [ $status -ne 1 ]; do + status=$(kubectl get cmk-nodereport | grep ENV | wc -l) + sleep 1 + echo not found + done + echo "cmk-nodereport is effective" + + - name: judge the runtime environment + set_fact: + cmk_runtime_env: "{{ groups['kube-node'][0] }}" + - debug: + var: cmk_runtime_env + + - name: prepare cmk check file + replace: + dest: "./cmk-check.sh" + regexp: 'ENV' + replace: '{{ cmk_runtime_env }}' + + - name: Changing perm of "sh", adding "+x" + shell: "chmod +x cmk-check.sh" + args: + warn: false + + - name: Run the script and re-evaluate the variable. + command: "./cmk-check.sh" + + - name: Clean the script and folder. + file: + path: ./cmk-check.sh + state: absent + + - name: untaint nodes + command: kubectl taint node "{{ item }}" cmk- + failed_when: false + register: untaint_result + changed_when: "untaint_result.rc == 0" + when: + - cmk_untaint_required + with_items: + - "{{ cmk_hosts_list }}" + - debug: + var: untaint_result diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml index 0fdfafeb..77bf9224 100644 --- a/kud/deployment_infra/playbooks/kud-vars.yml +++ b/kud/deployment_infra/playbooks/kud-vars.yml @@ -40,6 +40,22 @@ istio_source_type: "tarball" istio_version: 1.0.3 istio_url: "https://github.com/istio/istio/releases/download/{{ istio_version }}/istio-{{ istio_version }}-linux.tar.gz" +# Intel CPU Manager for Kubernetes +cmk_enabled: true +cmk_namespace: kube-system +cmk_use_all_hosts: false # 'true' will deploy CMK on the master nodes too +cmk_untaint_nodes: [compute01, compute02] +cmk_shared_num_cores: 1 # number of CPU cores to be assigned to the "shared" pool on each of the nodes +cmk_exclusive_num_cores: 2 # number of CPU cores to be assigned to the "exclusive" pool on each of the nodes +cmk_git_url: "https://github.com/intel/CPU-Manager-for-Kubernetes.git" +cmk_version: "v1.4.1" +cmk_dir: "/tmp/cmk" +registry_local_address: "localhost:5000" +cmk_pkgs: make,jq +cmk_untaint_required: true +#cmk_shared_mode: packed # choose between: packed, spread, default: packed +#cmk_exclusive_mode: packed # choose between: packed, spread, default: packed + go_version: '1.12.5' kubespray_version: 2.10.4 helm_client_version: 2.13.1 diff --git a/kud/deployment_infra/playbooks/preconfigure-cmk.yml b/kud/deployment_infra/playbooks/preconfigure-cmk.yml new file mode 100644 index 00000000..7aab4e2e --- /dev/null +++ b/kud/deployment_infra/playbooks/preconfigure-cmk.yml @@ -0,0 +1,62 @@ +--- +- hosts: kube-node + become: yes + pre_tasks: + - name: Load kud variables + include_vars: + file: kud-vars.yml + tasks: + - name: install cmk required packges + package: + name: "{{ item }}" + state: present + with_items: "{{ cmk_pkgs }}" + + - name: clean CMK directory + file: + path: "{{ cmk_dir }}" + state: absent + + - name: create CMK directory + file: + path: "{{ cmk_dir }}" + state: directory + + - name: clone CMK repository + command: git clone {{ cmk_git_url }} -b {{ cmk_version }} + args: + chdir: "{{ cmk_dir }}" + + - name: read current CMK version + command: echo v1.4.1 + args: + chdir: "{{ cmk_dir }}" + register: cmk_img_version + + - name: build CMK image + command: make + args: + chdir: "{{ cmk_dir }}/CPU-Manager-for-Kubernetes" + + - name: tag CMK image + command: docker tag cmk:{{ cmk_img_version.stdout }} {{ registry_local_address }}/cmk:{{ cmk_img_version.stdout }} + + - name: build list of CMK hosts + set_fact: + cmk_hosts_list: "{{ groups['kube-node'] | join(',') }}" + when: + - cmk_use_all_hosts != true + - (cmk_hosts_list is undefined) or (cmk_hosts_list | length == 0) + +- hosts: kube-master[0] + become: yes + pre_tasks: + - name: Load kud variables + include_vars: + file: kud-vars.yml + tasks: + - name: install cmk required packges + package: + name: "{{ item }}" + state: present + with_items: "{{ cmk_pkgs }}" diff --git a/kud/hosting_providers/baremetal/README.md b/kud/hosting_providers/baremetal/README.md index 4f81d7b5..5e1edf79 100644 --- a/kud/hosting_providers/baremetal/README.md +++ b/kud/hosting_providers/baremetal/README.md @@ -15,6 +15,10 @@ ansible playbooks allow to provision a deployment on Baremetal. The [installer](installer.sh) bash script contains the minimal Ubuntu instructions required for running this project. +NOTE: for cmk bare metal deployment, preset 1/2 CPUs for + shared/exlusive pools respectively to fit CI server machines + users can adjust the parameters to meet their own requirements. + ## License Apache-2.0 diff --git a/kud/hosting_providers/baremetal/aio.sh b/kud/hosting_providers/baremetal/aio.sh index c9fac098..6a304141 100755 --- a/kud/hosting_providers/baremetal/aio.sh +++ b/kud/hosting_providers/baremetal/aio.sh @@ -41,6 +41,9 @@ localhost [virtlet] localhost +[cmk] +localhost + [k8s-cluster:children] kube-node kube-master diff --git a/kud/hosting_providers/containerized/installer.sh b/kud/hosting_providers/containerized/installer.sh index 8739ca23..afea0b5a 100755 --- a/kud/hosting_providers/containerized/installer.sh +++ b/kud/hosting_providers/containerized/installer.sh @@ -119,7 +119,7 @@ function install_addons { ansible-playbook $verbose -i \ $kud_inventory $kud_playbooks/configure-kud.yml | \ tee $cluster_log/setup-kud.log - for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov $plugins_name}; do + for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov cmk $plugins_name}; do echo "Deploying $addon using configure-$addon.yml playbook.." ansible-playbook $verbose -i \ $kud_inventory $kud_playbooks/configure-${addon}.yml | \ @@ -128,7 +128,7 @@ function install_addons { echo "Run the test cases if testing_enabled is set to true." if [[ "${testing_enabled}" == "true" ]]; then - for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov $plugins_name}; do + for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov cmk $plugins_name}; do pushd $kud_tests bash ${addon}.sh popd diff --git a/kud/hosting_providers/vagrant/Vagrantfile b/kud/hosting_providers/vagrant/Vagrantfile index eb5e5cdc..bcaa9d0c 100644 --- a/kud/hosting_providers/vagrant/Vagrantfile +++ b/kud/hosting_providers/vagrant/Vagrantfile @@ -27,7 +27,7 @@ File.open(File.dirname(__FILE__) + "/inventory/hosts.ini", "w") do |inventory_fi nodes.each do |node| inventory_file.puts("#{node['name']}\tansible_ssh_host=#{node['ip']} ansible_ssh_port=22") end - ['kube-master', 'kube-node', 'etcd', 'ovn-central', 'ovn-controller', 'virtlet'].each do|group| + ['kube-master', 'kube-node', 'etcd', 'ovn-central', 'ovn-controller', 'virtlet', 'cmk'].each do|group| inventory_file.puts("\n[#{group}]") nodes.each do |node| if node['roles'].include?("#{group}") @@ -76,8 +76,24 @@ Vagrant.configure("2") do |config| v.random_hostname = true end + sync_type = "virtualbox" + if provider == :libvirt + sync_type = "nfs" + end + nodes.each do |node| config.vm.define node['name'] do |nodeconfig| + if node['roles'].include?("kube-master") + nodeconfig.vm.synced_folder '../../../', '/home/vagrant/multicloud-k8s/', type: sync_type + end + if node['roles'].include?("kube-node") + nodeconfig.vm.provision 'shell', privileged: false do |sh| + sh.inline = <<-SHELL + sudo sed -i 's:GRUB_CMDLINE_LINUX=.*:GRUB_CMDLINE_LINUX="isolcpus=0-7":' /etc/default/grub + sudo update-grub + SHELL + end + end nodeconfig.vm.hostname = node['name'] nodeconfig.vm.network :private_network, :ip => node['ip'], :type => :static nodeconfig.vm.provider 'virtualbox' do |v| @@ -111,10 +127,7 @@ Vagrant.configure("2") do |config| end end end - sync_type = "virtualbox" - if provider == :libvirt - sync_type = "nfs" - end + config.vm.define :installer, primary: true, autostart: false do |installer| installer.vm.hostname = "multicloud" installer.vm.network :private_network, :ip => "10.10.10.2", :type => :static @@ -126,6 +139,7 @@ Vagrant.configure("2") do |config| cp /home/vagrant/multicloud-k8s/kud/hosting_providers/vagrant/insecure_keys/key /home/vagrant/.ssh/id_rsa chown vagrant /home/vagrant/.ssh/id_rsa chmod 400 /home/vagrant/.ssh/id_rsa + sudo apt install jq -y cd /home/vagrant/multicloud-k8s/kud/hosting_providers/vagrant/ && ./installer.sh | tee kud_installer.log SHELL end diff --git a/kud/hosting_providers/vagrant/config/default.yml b/kud/hosting_providers/vagrant/config/default.yml index 094c3594..242998c4 100644 --- a/kud/hosting_providers/vagrant/config/default.yml +++ b/kud/hosting_providers/vagrant/config/default.yml @@ -44,10 +44,12 @@ - kube-node - ovn-controller - virtlet + - cmk - name: "compute02" ip: "10.10.10.7" - memory: 8192 - cpus: 4 + memory: 32768 + cpus: 16 roles: - kube-node - ovn-controller + - cmk diff --git a/kud/hosting_providers/vagrant/config/samples/pdf.yml.aio b/kud/hosting_providers/vagrant/config/samples/pdf.yml.aio index a87f967e..cd95776f 100644 --- a/kud/hosting_providers/vagrant/config/samples/pdf.yml.aio +++ b/kud/hosting_providers/vagrant/config/samples/pdf.yml.aio @@ -23,3 +23,4 @@ - kube-node - ovn-controller - virtlet + - cmk diff --git a/kud/hosting_providers/vagrant/config/samples/pdf.yml.mini b/kud/hosting_providers/vagrant/config/samples/pdf.yml.mini index d53a4537..258d7799 100644 --- a/kud/hosting_providers/vagrant/config/samples/pdf.yml.mini +++ b/kud/hosting_providers/vagrant/config/samples/pdf.yml.mini @@ -24,6 +24,7 @@ - kube-node - ovn-controller - virtlet + - cmk - name: "minion02" ip: "10.10.10.5" memory: 65536 @@ -31,3 +32,4 @@ roles: - kube-node - ovn-controller + - cmk diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh index 546d4058..859b49ce 100755 --- a/kud/hosting_providers/vagrant/installer.sh +++ b/kud/hosting_providers/vagrant/installer.sh @@ -155,13 +155,13 @@ function install_addons { _install_ansible sudo ansible-galaxy install $verbose -r $kud_infra_folder/galaxy-requirements.yml --ignore-errors ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml | sudo tee $log_folder/setup-kud.log - for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat}; do + for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk}; do echo "Deploying $addon using configure-$addon.yml playbook.." ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-${addon}.yml | sudo tee $log_folder/setup-${addon}.log done echo "Run the test cases if testing_enabled is set to true." if [[ "${testing_enabled}" == "true" ]]; then - for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat}; do + for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk}; do pushd $kud_tests bash ${addon}.sh popd diff --git a/kud/tests/cmk.sh b/kud/tests/cmk.sh new file mode 100755 index 00000000..1a14b5b8 --- /dev/null +++ b/kud/tests/cmk.sh @@ -0,0 +1,231 @@ +#!/bin/bash +ENV=$(kubectl get nodes --all-namespaces | wc -l) +if [[ $ENV -gt 2 ]]; then + COMPUTE_NODE=$(kubectl get nodes --all-namespaces | grep -v master | awk 'NR==2{print $1}') +else + COMPUTE_NODE=$(kubectl get nodes --all-namespaces | grep master | awk 'NR==1{print $1}') +fi +cases=("exclusive ${COMPUTE_NODE} 1" "shared ${COMPUTE_NODE} -1") +case=(null null 0) +num=${#cases[*]} +POOL=0 +NODE=1 +CORE=2 +DIR=/tmp +pod_name=cmk-test-pod + +function wait_for_pod_up { + status_phase="" + while [[ $status_phase != "Running" ]]; do + new_phase=$(kubectl get pods "$@" | awk 'NR==2{print $3}') + if [[ $new_phase != $status_phase ]]; then + echo "$(date +%H:%M:%S) - $@ : $new_phase" + status_phase=$new_phase + fi + if [[ $new_phase == "Running" ]]; then + echo "Pod $@ is up and running.." + fi + if [[ $new_phase == "Err"* ]]; then + exit 1 + fi + done +} + + +function start_nginx_pod { + kubectl delete deployment -n default nginx --ignore-not-found=true + kubectl create deployment nginx --image=nginx + sleep 2 + nginx_pod=$(kubectl get pods --all-namespaces| grep nginx | awk 'NR==1{print $2}') + wait_for_pod_up $nginx_pod + kubectl delete deployment -n default nginx --ignore-not-found=true + pod_status="Running" + until [[ $pod_status == "" ]]; do + pod_status=$(kubectl get pod $nginx_pod --ignore-not-found=true | awk 'NR==2{print $3}') + done +} + +rm -f $DIR/$pod_name.yaml +kubectl delete pod $pod_name --ignore-not-found=true --now --wait +echo +echo "env is $ENV" +echo +for ((i=0;i<$num;i++)); do + inner_case=(${cases[$i]}) + num_inner=${#inner_case[*]} + for ((j=0;j<$num_inner;j++)); do + case[$j]=${inner_case[$j]} + done + echo "##################################" + if [ "${case[$POOL]}" == "exclusive" ]; then + echo "TC: to allocate ${case[$CORE]} CPU(s) from pool of ${case[$POOL]} on node of ${case[$NODE]}" + TOTAL=$(kubectl get cmk-nodereport ${case[$NODE]} -o json | jq .spec.report.description.pools.${case[$POOL]} | jq .cpuLists | awk -F '{' '{print $(NF)}' | awk -F '}' '{print $(NF)}' | awk -F ',' '{print $(NF)}' | grep "\"tasks\": \[" | wc -l) + echo "ready to generate yaml" +cat << EOF > $DIR/$pod_name.yaml + apiVersion: v1 + kind: Pod + metadata: + labels: + app: cmk-test-pod + name: cmk-test-pod + spec: + nodeName: ${case[$NODE]} + containers: + - args: + - "/opt/bin/cmk isolate --conf-dir=/etc/cmk --pool=exclusive sleep -- 3900" + command: + - "sh" + - "-c" + env: + - name: CMK_PROC_FS + value: "/host/proc" + - name: CMK_NUM_CORES + value: "${case[$CORE]}" + image: ubuntu:18.04 + imagePullPolicy: "IfNotPresent" + name: cmk-test + volumeMounts: + - mountPath: "/host/proc" + name: host-proc + - mountPath: "/opt/bin" + name: cmk-install-dir + - mountPath: "/etc/cmk" + name: cmk-conf-dir + restartPolicy: Never + volumes: + - hostPath: + path: "/opt/bin" + name: cmk-install-dir + - hostPath: + path: "/proc" + name: host-proc + - hostPath: + path: "/etc/cmk" + name: cmk-conf-dir +EOF + + echo "ready to create pod" + kubectl create -f $DIR/$pod_name.yaml --validate=false + sleep 2 + echo "waiting for pod up" + for pod in $pod_name; do + wait_for_pod_up $pod + done + echo "waiting for CPU allocation finished ..." + rest=$TOTAL + until [[ $TOTAL -gt $rest ]]; do + rest=$(kubectl get cmk-nodereport ${case[$NODE]} -o json | jq .spec.report.description.pools.exclusive | jq .cpuLists | awk -F '{' '{print $(NF)}' | awk -F '}' '{print $(NF)}' | awk -F ',' '{print $(NF)}' | grep "\"tasks\": \[\]" | wc -l) + done + let allocated=`expr $TOTAL - $rest` + echo "The allocated CPU amount is:" $allocated + echo "deploy a nginx pod" + start_nginx_pod + if [[ $allocated == ${case[$CORE]} ]]; then + echo "CPU was allocated as expected, TC passed !!" + else + echo "failed to allocate CPU, TC failed !!" + fi + rm -f $DIR/$pod_name.yaml + echo "ready to delete pod" + kubectl delete pod $pod_name --ignore-not-found=true --now --wait + echo "Pod was deleted" + echo "##################################" + echo + echo + else + echo "TC: to allocate CPU(s) from pool of ${case[$POOL]} on node of ${case[$NODE]}" + echo "ready to generate yaml" +cat << EOF > $DIR/$pod_name.yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + app: cmk-test-pod + name: cmk-test-pod +spec: + nodeName: ${case[$NODE]} + containers: + - name: share1 + args: + - "/opt/bin/cmk isolate --conf-dir=/etc/cmk --pool=shared sleep -- 3900" + command: + - "sh" + - "-c" + env: + - name: CMK_PROC_FS + value: "/host/proc" + - name: CMK_NUM_CORES + value: "3" + image: ubuntu:18.10 + imagePullPolicy: "IfNotPresent" + volumeMounts: + - mountPath: "/host/proc" + name: host-proc + - mountPath: "/opt/bin" + name: cmk-install-dir + - mountPath: "/etc/cmk" + name: cmk-conf-dir + - name: share2 + args: + - "/opt/bin/cmk isolate --conf-dir=/etc/cmk --pool=shared sleep -- 3300" + command: + - "sh" + - "-c" + env: + - name: CMK_PROC_FS + value: "/host/proc" + - name: CMK_NUM_CORES + value: "3" + image: ubuntu:18.10 + imagePullPolicy: "IfNotPresent" + volumeMounts: + - mountPath: "/host/proc" + name: host-proc + - mountPath: "/opt/bin" + name: cmk-install-dir + - mountPath: "/etc/cmk" + name: cmk-conf-dir + volumes: + - hostPath: + path: "/opt/bin" + name: cmk-install-dir + - hostPath: + path: "/proc" + name: host-proc + - hostPath: + path: "/etc/cmk" + name: cmk-conf-dir +EOF + + echo "ready to create pod" + kubectl create -f $DIR/$pod_name.yaml --validate=false + sleep 2 + echo "waiting for pod up" + for pod in $pod_name; do + wait_for_pod_up $pod + done + echo "waiting for CPU allocation finished ..." + rest=0 + timeout=0 + until [ $rest == 2 -o $timeout == 180 ]; do + rest=$(kubectl get cmk-nodereport ${case[$NODE]} -o json | jq .spec.report.description.pools.shared | jq .cpuLists | awk -F '{' '{print $(NF)}' | awk -F '}' '{print $(NF)}' | grep -v "cpus" | grep " "| grep -v "tasks"| grep -v "\]" | wc -l) + sleep -- 1 + let timeout++ + done + echo "The CPU allocated in shared pool for 2 tasks" + echo "deploy a nginx pod" + start_nginx_pod + if [[ $rest == 2 ]]; then + echo "CPU was allocated as expected, TC passed !!" + else + echo "failed to allocate CPU, TC failed !!" + fi + rm -f $DIR/$pod_name.yaml + echo "ready to delete pod" + kubectl delete pod $pod_name --ignore-not-found=true --now --wait + echo "Pod was deleted" + echo "##################################" + echo + echo + fi +done |