diff options
Diffstat (limited to 'kud')
-rw-r--r-- | kud/deployment_infra/images/nfd-master.yaml | 17 | ||||
-rw-r--r-- | kud/deployment_infra/playbooks/configure-emco.yml | 10 | ||||
-rw-r--r-- | kud/deployment_infra/playbooks/configure-onap4k8s.yml | 10 | ||||
-rw-r--r-- | kud/deployment_infra/playbooks/configure-ovn4nfv.yml | 4 | ||||
-rw-r--r-- | kud/deployment_infra/playbooks/configure-qat.yml | 2 | ||||
-rw-r--r-- | kud/deployment_infra/playbooks/configure-virtlet.yml | 2 | ||||
-rw-r--r-- | kud/deployment_infra/playbooks/install_qat.sh | 2 | ||||
-rw-r--r-- | kud/deployment_infra/playbooks/preconfigure-qat.yml | 162 | ||||
-rwxr-xr-x | kud/hosting_providers/containerized/installer.sh | 19 | ||||
-rw-r--r-- | kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml | 36 | ||||
-rwxr-xr-x | kud/hosting_providers/vagrant/installer.sh | 7 | ||||
-rw-r--r-- | kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml | 34 | ||||
-rwxr-xr-x | kud/tests/qat.sh | 15 | ||||
-rwxr-xr-x | kud/tests/sriov.sh | 13 |
14 files changed, 210 insertions, 123 deletions
diff --git a/kud/deployment_infra/images/nfd-master.yaml b/kud/deployment_infra/images/nfd-master.yaml index 846bb753..4e07c2ed 100644 --- a/kud/deployment_infra/images/nfd-master.yaml +++ b/kud/deployment_infra/images/nfd-master.yaml @@ -37,6 +37,23 @@ subjects: name: nfd-master namespace: node-feature-discovery --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: psp:default:privileged + namespace: node-feature-discovery +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:privileged +subjects: +- kind: ServiceAccount + name: default + namespace: node-feature-discovery +- kind: ServiceAccount + name: nfd-master + namespace: node-feature-discovery +--- apiVersion: apps/v1 kind: DaemonSet metadata: diff --git a/kud/deployment_infra/playbooks/configure-emco.yml b/kud/deployment_infra/playbooks/configure-emco.yml index 7a4cf926..96b4a23d 100644 --- a/kud/deployment_infra/playbooks/configure-emco.yml +++ b/kud/deployment_infra/playbooks/configure-emco.yml @@ -36,12 +36,20 @@ - debug: var: make_all.stdout_lines + - name: Create emco namespace + shell: "/usr/local/bin/kubectl create namespace emco" + ignore_errors: True + + - name: Create pod security policy role bindings + shell: "/usr/local/bin/kubectl -n emco create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=emco:default --serviceaccount=emco:emco-fluentd" + ignore_errors: True + - name: Get cluster name shell: "kubectl -n kube-system get configmap/kubeadm-config -o yaml | grep clusterName: | awk '{print $2}'" register: cluster_name - name: Change the emco directory and run the command helm install - command: /usr/local/bin/helm install --namespace emco --create-namespace --set emco-tools.fluentd.clusterDomain={{ cluster_name.stdout }} emco dist/packages/emco-0.1.0.tgz + command: /usr/local/bin/helm install --namespace emco --set emco-tools.fluentd.clusterDomain={{ cluster_name.stdout }} emco dist/packages/emco-0.1.0.tgz register: helm_install args: chdir: /opt/multicloud/deployments/helm/v2/emco diff --git a/kud/deployment_infra/playbooks/configure-onap4k8s.yml b/kud/deployment_infra/playbooks/configure-onap4k8s.yml index c016cf1c..48052225 100644 --- a/kud/deployment_infra/playbooks/configure-onap4k8s.yml +++ b/kud/deployment_infra/playbooks/configure-onap4k8s.yml @@ -36,8 +36,16 @@ - debug: var: make_all.stdout_lines + - name: Create onap4k8s-ns namespace + shell: "/usr/local/bin/kubectl create namespace onap4k8s-ns" + ignore_errors: True + + - name: Create pod security policy role bindings + shell: "/usr/local/bin/kubectl -n onap4k8s-ns create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=onap4k8s-ns:default" + ignore_errors: True + - name: Change the onap4k8s directory and run the command helm install - command: /usr/local/bin/helm install --namespace onap4k8s-ns --create-namespace --set service.type=NodePort multicloud-onap8ks dist/packages/multicloud-k8s-5.0.0.tgz + command: /usr/local/bin/helm install --namespace onap4k8s-ns --set service.type=NodePort multicloud-onap8ks dist/packages/multicloud-k8s-5.0.0.tgz register: helm_install args: chdir: /opt/multicloud/deployments/helm/onap4k8s diff --git a/kud/deployment_infra/playbooks/configure-ovn4nfv.yml b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml index b335f8c8..7043bf53 100644 --- a/kud/deployment_infra/playbooks/configure-ovn4nfv.yml +++ b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml @@ -40,6 +40,10 @@ shell: "/usr/local/bin/kubectl create namespace operator" ignore_errors: True + - name: create pod security policy role bindings + shell: "/usr/local/bin/kubectl -n operator create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=operator:default --serviceaccount=operator:k8s-nfn-sa" + ignore_errors: True + - name: apply nfn operator label command: "/usr/local/bin/kubectl label node {{ item }} nfnType=operator --overwrite" with_inventory_hostnames: ovn-central diff --git a/kud/deployment_infra/playbooks/configure-qat.yml b/kud/deployment_infra/playbooks/configure-qat.yml index 1225b3d4..39f52403 100644 --- a/kud/deployment_infra/playbooks/configure-qat.yml +++ b/kud/deployment_infra/playbooks/configure-qat.yml @@ -11,5 +11,5 @@ - import_playbook: preconfigure-qat.yml - hosts: localhost tasks: - - name: Apply QAT plugin previleges Daemonset + - name: Apply QAT plugin privileges Daemonset command: "/usr/local/bin/kubectl apply -f {{ playbook_dir }}/../images/qat_plugin_privileges.yaml" diff --git a/kud/deployment_infra/playbooks/configure-virtlet.yml b/kud/deployment_infra/playbooks/configure-virtlet.yml index d2461f73..6ba840ce 100644 --- a/kud/deployment_infra/playbooks/configure-virtlet.yml +++ b/kud/deployment_infra/playbooks/configure-virtlet.yml @@ -40,7 +40,7 @@ - regexp: 'centos/(\d+)-(\d+)' url: 'https://cloud.centos.org/centos/$1/images/CentOS-$1-x86_64-GenericCloud-$2.qcow2' - name: fedora - url: https://dl.fedoraproject.org/pub/fedora/linux/releases/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2 + url: https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2 {% if lookup('env','http_proxy') != "" %} transports: "": diff --git a/kud/deployment_infra/playbooks/install_qat.sh b/kud/deployment_infra/playbooks/install_qat.sh index 57adb923..4a7fdef7 100644 --- a/kud/deployment_infra/playbooks/install_qat.sh +++ b/kud/deployment_infra/playbooks/install_qat.sh @@ -1,7 +1,7 @@ #!/bin/bash # Precondition: -# QAT device installed, such as lspci | grep 37c8 +# QAT device installed, such as lspci -n | grep 37c8 # Enable grub with "intel_iommu=on iommu=pt" ROOT= diff --git a/kud/deployment_infra/playbooks/preconfigure-qat.yml b/kud/deployment_infra/playbooks/preconfigure-qat.yml index f5d797f1..ef8446f8 100644 --- a/kud/deployment_infra/playbooks/preconfigure-qat.yml +++ b/kud/deployment_infra/playbooks/preconfigure-qat.yml @@ -19,12 +19,10 @@ file: state: directory path: "{{ qat_dest }}" - - name: Fetching QAT driver - block: - - name: Download QAT driver tarball - get_url: - url: "{{ qat_driver_url }}" - dest: "{{ qat_dest }}/{{ qat_package }}.tar.gz" + - name: Download QAT driver tarball + get_url: + url: "{{ qat_driver_url }}" + dest: "{{ qat_dest }}/{{ qat_package }}.tar.gz" - hosts: kube-node become: yes @@ -33,21 +31,13 @@ include_vars: file: kud-vars.yml tasks: - - name: Create a destination for driver folder in the target's /tmp - file: - state: directory - path: "{{ item }}" - with_items: - - "{{ base_dest }}/quick-assist/{{ qat_package }}" - - name: Create QAT dest folder + - name: Create destination folder for QAT check script file: state: directory - path: "qat" - - name: Register QAT env variable - shell: "echo {{ QAT_ENABLED | default(False) }}" + path: "{{ base_dest }}/qat" - name: Create QAT check script copy: - dest: "qat/qat.sh" + dest: "{{ base_dest }}/qat/qat.sh" content: | #!/bin/bash qat_device=$( for i in 0434 0435 37c8 6f54 19e2; \ @@ -59,15 +49,11 @@ else echo "True" fi - - name: Changing perm of "sh", adding "+x" - shell: "chmod +x qat.sh" - args: - chdir: "qat" - warn: False - - name: Run the script and re-evaluate the variable. - command: "./qat.sh" + mode: 0755 + - name: Run QAT check script and re-evaluate the variable + command: ./qat.sh args: - chdir: "qat" + chdir: "{{ base_dest }}/qat" register: output - debug: var: output.stdout_lines @@ -75,70 +61,68 @@ QAT_ENABLED: "{{ output.stdout }}" - debug: var: output - - name: Clean the script and folder. + - name: Clean QAT check script and folder file: - path: qat + path: "{{ base_dest }}/qat" state: absent - - name: bootstrap | install qat compilation packages - package: - name: "{{ item }}" - state: present - with_items: - - pciutils - - build-essential - - libudev-dev - - pkg-config - when: QAT_ENABLED - - copy: - src: "{{ qat_dest }}/{{ qat_package }}.tar.gz" - dest: "{{ base_dest }}/quick-assist" - remote_src: no - when: QAT_ENABLED - - name: Extract QAT source code - unarchive: - src: "{{ qat_dest }}/{{ qat_package }}.tar.gz" - dest: "{{ base_dest }}/quick-assist/{{ qat_package }}" - when: QAT_ENABLED - - name: Configure the target - command: ./configure --enable-icp-sriov=host - args: - chdir: "{{ base_dest }}/quick-assist/{{ qat_package }}" - when: QAT_ENABLED - - name: build qat driver - make: - chdir: "{{ base_dest }}/quick-assist/{{ qat_package }}" - target: "{{ item }}" - loop: - - clean - - uninstall - - install - when: QAT_ENABLED - - name: Create QAT driver folder in the target destination - file: - state: directory - path: "{{ item }}" - with_items: - - qat_driver_dest - when: QAT_ENABLED - - name: Copy QAT build directory qat target destination - command: "cp -r {{ base_dest }}/quick-assist/{{ qat_package }}/build/ /root/qat_driver_dest/" - when: QAT_ENABLED - - name: Copy QAT driver install script to target folder - command: "cp {{ playbook_dir }}/install_qat.sh /root/qat_driver_dest/build/install.sh" - when: QAT_ENABLED - - name: Copy QAT to target folder - command: "cp /etc/default/qat /root/qat_driver_dest/build" - when: QAT_ENABLED - - name: Changing perm of "install.sh", adding "+x" - file: dest=~/qat_driver_dest/build/install.sh mode=a+x - when: QAT_ENABLED - - name: Run a script with arguments - command: ./install.sh chdir=/root/qat_driver_dest/build - when: QAT_ENABLED - - name: get qat devices - shell: /usr/local/bin/adf_ctl status | grep up | awk '{print $4 substr($1, 4)}' | tr -d ',' - register: qat_devices - when: QAT_ENABLED - - name: Updating the qat device SSL values to avoid duplication - command: "./substitute.sh chdir={{ playbook_dir }}" + - name: Install QAT driver + block: + - name: Install QAT compilation packages + package: + name: "{{ item }}" + state: present + with_items: + - pciutils + - build-essential + - libudev-dev + - pkg-config + - name: Create destination folder for QAT source code + file: + state: directory + path: "{{ qat_dest }}/{{ qat_package }}" + - name: Extract QAT source code + unarchive: + src: "{{ qat_dest }}/{{ qat_package }}.tar.gz" + dest: "{{ qat_dest }}/{{ qat_package }}" + - name: Configure the target + command: ./configure --enable-icp-sriov=host + args: + chdir: "{{ qat_dest }}/{{ qat_package }}" + - name: Build QAT driver + make: + chdir: "{{ qat_dest }}/{{ qat_package }}" + target: "{{ item }}" + loop: + - clean + - uninstall + - install + - name: Copy QAT driver install script to target folder + copy: + src: "install_qat.sh" + dest: "{{ qat_dest }}/{{ qat_package }}/build" + mode: 0755 + - name: Copy /etc/default/qat to target folder + copy: + src: "/etc/default/qat" + dest: "{{ qat_dest }}/{{ qat_package }}/build" + remote_src: yes + - name: Run a script with arguments + command: ./install_qat.sh + args: + chdir: "{{ qat_dest }}/{{ qat_package }}/build" + - name: Copy QAT substitue script to target folder + copy: + src: "substitute.sh" + dest: "{{ qat_dest }}/{{ qat_package }}/build" + mode: 0755 + - name: Update the QAT device SSL values to avoid duplication + command: ./substitute.sh + args: + chdir: "{{ qat_dest }}/{{ qat_package }}/build" + - name: Restart acceleration driver framework + command: adf_ctl restart + - name: Restart QAT service + service: + name: qat_service + state: restarted when: QAT_ENABLED diff --git a/kud/hosting_providers/containerized/installer.sh b/kud/hosting_providers/containerized/installer.sh index 226f4568..b2ec52af 100755 --- a/kud/hosting_providers/containerized/installer.sh +++ b/kud/hosting_providers/containerized/installer.sh @@ -118,38 +118,43 @@ function install_addons { $kud_infra_folder/galaxy-requirements.yml --ignore-errors ansible-playbook $verbose -i \ - $kud_inventory $kud_playbooks/configure-kud.yml | \ + $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml | \ tee $cluster_log/setup-kud.log # The order of KUD_ADDONS is important: some plugins (sriov, qat) # require nfd to be enabled. - for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov cmk $plugins_name}; do + for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk $plugins_name}; do echo "Deploying $addon using configure-$addon.yml playbook.." ansible-playbook $verbose -i \ - $kud_inventory $kud_playbooks/configure-${addon}.yml | \ + $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-${addon}.yml | \ tee $cluster_log/setup-${addon}.log done echo "Run the test cases if testing_enabled is set to true." if [[ "${testing_enabled}" == "true" ]]; then - for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov cmk $plugins_name}; do + failed_kud_tests="" + for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk $plugins_name}; do pushd $kud_tests - bash ${addon}.sh + bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}" case $addon in "onap4k8s" ) echo "Test the onap4k8s plugin installation" for functional_test in plugin_edgex plugin_fw plugin_eaa; do - bash ${functional_test}.sh --external + bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}" done ;; "emco" ) echo "Test the emco plugin installation" for functional_test in plugin_fw_v2; do - bash ${functional_test}.sh --external + bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}" done ;; esac popd done + if [[ ! -z "$failed_kud_tests" ]]; then + echo "Test cases failed:${failed_kud_tests}" + return 1 + fi fi echo "Add-ons deployment complete..." } diff --git a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml index 0a2953ce..18a55035 100644 --- a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml +++ b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml @@ -52,8 +52,6 @@ local_volume_provisioner_enabled: true # Helm deployment helm_enabled: true -docker_version: 'latest' - # Kube-proxy proxyMode configuration. # NOTE: Ipvs is based on netfilter hook function, but uses hash table as the underlying data structure and # works in the kernel space @@ -81,3 +79,37 @@ kube_pods_subnet: 10.244.64.0/18 # disable localdns cache enable_nodelocaldns: false + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: true +# The restricted spec is identical to the kubespray podsecuritypolicy_privileged_spec, with the replacement of +# allowedCapabilities: +# - '*' +# by +# requiredDropCapabilities: +# - NET_RAW +podsecuritypolicy_restricted_spec: + privileged: true + allowPrivilegeEscalation: true + volumes: + - '*' + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + hostIPC: true + hostPID: true + requiredDropCapabilities: + - NET_RAW + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false + # This will fail if allowed-unsafe-sysctls is not set accordingly in kubelet flags + allowedUnsafeSysctls: + - '*' diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh index 53164989..43638b4f 100755 --- a/kud/hosting_providers/vagrant/installer.sh +++ b/kud/hosting_providers/vagrant/installer.sh @@ -165,11 +165,16 @@ function install_addons { done echo "Run the test cases if testing_enabled is set to true." if [[ "${testing_enabled}" == "true" ]]; then + failed_kud_tests="" for addon in ${KUD_ADDONS:-multus topology-manager virtlet ovn4nfv nfd sriov qat optane cmk}; do pushd $kud_tests - bash ${addon}.sh + bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}" popd done + if [[ ! -z "$failed_kud_tests" ]]; then + echo "Test cases failed:${failed_kud_tests}" + return 1 + fi fi echo "Add-ons deployment complete..." } diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml index ba79b4b9..5b06b788 100644 --- a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml +++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml @@ -76,3 +76,37 @@ download_localhost: True kube_service_addresses: 10.244.0.0/18 # Subnet for Pod IPs kube_pods_subnet: 10.244.64.0/18 + +# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) +podsecuritypolicy_enabled: true +# The restricted spec is identical to the kubespray podsecuritypolicy_privileged_spec, with the replacement of +# allowedCapabilities: +# - '*' +# by +# requiredDropCapabilities: +# - NET_RAW +podsecuritypolicy_restricted_spec: + privileged: true + allowPrivilegeEscalation: true + volumes: + - '*' + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + hostIPC: true + hostPID: true + requiredDropCapabilities: + - NET_RAW + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false + # This will fail if allowed-unsafe-sysctls is not set accordingly in kubelet flags + allowedUnsafeSysctls: + - '*' diff --git a/kud/tests/qat.sh b/kud/tests/qat.sh index 2f8d212a..8365f700 100755 --- a/kud/tests/qat.sh +++ b/kud/tests/qat.sh @@ -10,16 +10,13 @@ set -o pipefail -qat_device=$( for i in 0434 0435 37c8 6f54 19e2; \ - do lspci -d 8086:$i -m; done |\ - grep -i "Quick*" | head -n 1 | cut -d " " -f 5 ) -#Checking if the QAT device is on the node -if [ -z "$qat_device" ]; then - echo "False. This test case cannot run. Qat device unavailable." +qat_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.capacity."qat.intel.com/cy2_dc2">="1") | .metadata.name') +if [ -z "$qat_capable_nodes" ]; then + echo "This test case cannot run. QAT device unavailable." QAT_ENABLED=False exit 0 else - echo "True. Can run QAT on this device." + echo "Can run QAT on this cluster." QAT_ENABLED=True fi @@ -78,9 +75,7 @@ kubectl create -f $HOME/$pod_name.yaml --validate=false allocated_node_resource=$(kubectl describe node | grep "qat.intel.com" | tail -n1 |awk '{print $(NF)}') echo "The allocated resource of the node is: " $allocated_node_resource -adf_ctl restart -systemctl restart qat_service -kubectl exec -it pod-case-01 -- openssl engine -c -t qat +kubectl exec pod-case-01 -- openssl engine -c -t qat kubectl delete pod $pod_name --now echo "Test complete." diff --git a/kud/tests/sriov.sh b/kud/tests/sriov.sh index 2dea576e..e617ea62 100755 --- a/kud/tests/sriov.sh +++ b/kud/tests/sriov.sh @@ -10,17 +10,12 @@ set -o pipefail -ethernet_adpator_version=$( lspci | grep "Ethernet Controller XL710" | head -n 1 | cut -d " " -f 8 ) -if [ -z "$ethernet_adpator_version" ]; then - echo " Ethernet adapator version is not set. SRIOV test case cannot run on this machine" +sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.capacity."intel.com/intel_sriov_700">="2") | .metadata.name') +if [ -z "$sriov_capable_nodes" ]; then + echo "SRIOV test case cannot run on the cluster." exit 0 -fi -#checking for the right hardware version of NIC on the machine -if [ $ethernet_adpator_version == "XL710" ]; then - echo "NIC card specs match. SRIOV option avaiable for this version." else - echo -e "Failed. The version supplied does not match.\nTest cannot be executed." - exit 0 + echo "SRIOV option avaiable in the cluster." fi pod_name=pod-case-01 |