aboutsummaryrefslogtreecommitdiffstats
path: root/kud
diff options
context:
space:
mode:
Diffstat (limited to 'kud')
-rw-r--r--kud/deployment_infra/galaxy-requirements.yml2
-rw-r--r--kud/deployment_infra/images/multus-daemonset.yml62
-rw-r--r--kud/deployment_infra/images/nfd-master.yaml17
-rw-r--r--kud/deployment_infra/images/sriov-cni.yml9
-rw-r--r--kud/deployment_infra/images/sriov-daemonset.yml9
-rw-r--r--kud/deployment_infra/playbooks/configure-emco-reset.yml11
-rw-r--r--kud/deployment_infra/playbooks/configure-emco.yml23
-rw-r--r--kud/deployment_infra/playbooks/configure-kud.yml5
-rw-r--r--kud/deployment_infra/playbooks/configure-onap4k8s-reset.yml11
-rw-r--r--kud/deployment_infra/playbooks/configure-onap4k8s.yml19
-rw-r--r--kud/deployment_infra/playbooks/configure-optane.yml2
-rw-r--r--kud/deployment_infra/playbooks/configure-ovn4nfv.yml4
-rw-r--r--kud/deployment_infra/playbooks/configure-qat.yml2
-rw-r--r--kud/deployment_infra/playbooks/configure-sriov.yml14
-rw-r--r--kud/deployment_infra/playbooks/configure-virtlet.yml2
-rw-r--r--kud/deployment_infra/playbooks/install_qat.sh2
-rw-r--r--kud/deployment_infra/playbooks/kud-vars.yml8
-rw-r--r--kud/deployment_infra/playbooks/preconfigure-kubespray.yml19
-rw-r--r--kud/deployment_infra/playbooks/preconfigure-optane.yml16
-rw-r--r--kud/deployment_infra/playbooks/preconfigure-qat.yml162
-rw-r--r--kud/deployment_infra/playbooks/preconfigure-sriov.yml150
-rwxr-xr-xkud/hosting_providers/containerized/installer.sh33
-rw-r--r--kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml39
-rwxr-xr-xkud/hosting_providers/vagrant/installer.sh13
-rw-r--r--kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml37
-rwxr-xr-xkud/tests/emco.sh2
-rwxr-xr-xkud/tests/plugin_fw.sh4
-rwxr-xr-xkud/tests/qat.sh15
-rwxr-xr-xkud/tests/sriov.sh13
-rwxr-xr-xkud/tests/topology-manager.sh5
30 files changed, 409 insertions, 301 deletions
diff --git a/kud/deployment_infra/galaxy-requirements.yml b/kud/deployment_infra/galaxy-requirements.yml
index 3191dc19..9747dc99 100644
--- a/kud/deployment_infra/galaxy-requirements.yml
+++ b/kud/deployment_infra/galaxy-requirements.yml
@@ -10,6 +10,6 @@
- src: andrewrothstein.go
version: v2.1.15
- src: andrewrothstein.kubernetes-helm
- version: v1.2.17
+ version: v1.3.16
- src: geerlingguy.docker
version: 2.5.2
diff --git a/kud/deployment_infra/images/multus-daemonset.yml b/kud/deployment_infra/images/multus-daemonset.yml
index d6d8d533..09759360 100644
--- a/kud/deployment_infra/images/multus-daemonset.yml
+++ b/kud/deployment_infra/images/multus-daemonset.yml
@@ -1,11 +1,10 @@
---
-apiVersion: apiextensions.k8s.io/v1beta1
+apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
- version: v1
scope: Namespaced
names:
plural: network-attachment-definitions
@@ -13,16 +12,27 @@ spec:
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
- validation:
- openAPIV3Schema:
- properties:
- spec:
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ schema:
+ openAPIV3Schema:
+ description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
+ Working Group to express the intent for attaching pods to one or more logical or physical
+ networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
+ type: object
properties:
- config:
- type: string
+ spec:
+ description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
+ type: object
+ properties:
+ config:
+ description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
+ type: string
---
kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
rules:
@@ -39,9 +49,18 @@ rules:
verbs:
- get
- update
+ - apiGroups:
+ - ""
+ - events.k8s.io
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
---
kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
roleRef:
@@ -68,11 +87,18 @@ metadata:
tier: node
app: multus
data:
+ # NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
+ # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
+ # change the "args" line below from
+ # - "--multus-conf-file=auto"
+ # to:
+ # "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
+ # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
+ # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
- "cniVersion": "0.3.1",
"capabilities": {
"portMappings": true
},
@@ -109,11 +135,11 @@ metadata:
labels:
tier: node
app: multus
+ name: multus
spec:
selector:
matchLabels:
- tier: node
- app: multus
+ name: multus
updateStrategy:
type: RollingUpdate
template:
@@ -121,20 +147,22 @@ spec:
labels:
tier: node
app: multus
+ name: multus
spec:
hostNetwork: true
nodeSelector:
- beta.kubernetes.io/arch: amd64
+ kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
- image: nfvpe/multus:v3.3-tp
+ image: integratedcloudnative/multus:v3.4.1-tp
command: ["/entrypoint.sh"]
args:
- - "--multus-conf-file=/tmp/multus-conf/00-multus.conf"
+ - "--multus-conf-file=auto"
+ - "--cni-version=0.3.1"
resources:
requests:
cpu: "100m"
@@ -163,4 +191,4 @@ spec:
name: multus-cni-config
items:
- key: cni-conf.json
- path: 00-multus.conf
+ path: 70-multus.conf
diff --git a/kud/deployment_infra/images/nfd-master.yaml b/kud/deployment_infra/images/nfd-master.yaml
index 846bb753..4e07c2ed 100644
--- a/kud/deployment_infra/images/nfd-master.yaml
+++ b/kud/deployment_infra/images/nfd-master.yaml
@@ -37,6 +37,23 @@ subjects:
name: nfd-master
namespace: node-feature-discovery
---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: psp:default:privileged
+ namespace: node-feature-discovery
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:privileged
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: node-feature-discovery
+- kind: ServiceAccount
+ name: nfd-master
+ namespace: node-feature-discovery
+---
apiVersion: apps/v1
kind: DaemonSet
metadata:
diff --git a/kud/deployment_infra/images/sriov-cni.yml b/kud/deployment_infra/images/sriov-cni.yml
index 7503b872..570b00ee 100644
--- a/kud/deployment_infra/images/sriov-cni.yml
+++ b/kud/deployment_infra/images/sriov-cni.yml
@@ -21,6 +21,15 @@ spec:
tier: node
app: sriov-cni
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: feature.node.kubernetes.io/network-sriov.capable
+ operator: In
+ values:
+ - "true"
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
diff --git a/kud/deployment_infra/images/sriov-daemonset.yml b/kud/deployment_infra/images/sriov-daemonset.yml
index e392028d..41b1cbaa 100644
--- a/kud/deployment_infra/images/sriov-daemonset.yml
+++ b/kud/deployment_infra/images/sriov-daemonset.yml
@@ -46,6 +46,15 @@ spec:
tier: node
app: sriovdp
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: feature.node.kubernetes.io/network-sriov.capable
+ operator: In
+ values:
+ - "true"
hostNetwork: true
hostPID: true
nodeSelector:
diff --git a/kud/deployment_infra/playbooks/configure-emco-reset.yml b/kud/deployment_infra/playbooks/configure-emco-reset.yml
index a4560cc2..7cad36e4 100644
--- a/kud/deployment_infra/playbooks/configure-emco-reset.yml
+++ b/kud/deployment_infra/playbooks/configure-emco-reset.yml
@@ -15,7 +15,7 @@
file: kud-vars.yml
- name: Change the emco directory and run helm delete
- command: /usr/local/bin/helm delete --purge emco
+ command: /usr/local/bin/helm uninstall --namespace emco emco
register: helm_delete
args:
chdir: /opt/multicloud/deployments/helm/v2/emco
@@ -41,15 +41,6 @@
- debug:
var: make_clean.stdout_lines
- - name: Change the emco directory and make repo-stop
- command: /usr/bin/make repo-stop
- register: make_repo_stop
- args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
-
- - debug:
- var: make_repo_stop.stdout_lines
-
- name: clean multicloud-k8s path
file:
state: absent
diff --git a/kud/deployment_infra/playbooks/configure-emco.yml b/kud/deployment_infra/playbooks/configure-emco.yml
index 255ce6a4..96b4a23d 100644
--- a/kud/deployment_infra/playbooks/configure-emco.yml
+++ b/kud/deployment_infra/playbooks/configure-emco.yml
@@ -27,15 +27,6 @@
yum: name=make state=present update_cache=yes
when: ansible_distribution == "CentOS"
- - name: Change the emco directory and run the command make repo
- command: /usr/bin/make repo
- register: make_repo
- args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
-
- - debug:
- var: make_repo.stdout_lines
-
- name: Change the emco directory and run the command make all
command: /usr/bin/make all
register: make_all
@@ -45,8 +36,20 @@
- debug:
var: make_all.stdout_lines
+ - name: Create emco namespace
+ shell: "/usr/local/bin/kubectl create namespace emco"
+ ignore_errors: True
+
+ - name: Create pod security policy role bindings
+ shell: "/usr/local/bin/kubectl -n emco create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=emco:default --serviceaccount=emco:emco-fluentd"
+ ignore_errors: True
+
+ - name: Get cluster name
+ shell: "kubectl -n kube-system get configmap/kubeadm-config -o yaml | grep clusterName: | awk '{print $2}'"
+ register: cluster_name
+
- name: Change the emco directory and run the command helm install
- command: /usr/local/bin/helm install dist/packages/emco-0.1.0.tgz --name emco --namespace emco
+ command: /usr/local/bin/helm install --namespace emco --set emco-tools.fluentd.clusterDomain={{ cluster_name.stdout }} emco dist/packages/emco-0.1.0.tgz
register: helm_install
args:
chdir: /opt/multicloud/deployments/helm/v2/emco
diff --git a/kud/deployment_infra/playbooks/configure-kud.yml b/kud/deployment_infra/playbooks/configure-kud.yml
index 6ac0477d..0e32e69d 100644
--- a/kud/deployment_infra/playbooks/configure-kud.yml
+++ b/kud/deployment_infra/playbooks/configure-kud.yml
@@ -23,11 +23,6 @@
when: helm_client.rc != 0
vars:
kubernetes_helm_ver: "v{{ helm_client_version }}"
- tasks:
- - name: Initialize helm client
- command: helm init -c
- args:
- creates: ~/.helm
- hosts: kube-node
become: yes
diff --git a/kud/deployment_infra/playbooks/configure-onap4k8s-reset.yml b/kud/deployment_infra/playbooks/configure-onap4k8s-reset.yml
index d6f2f6bc..ddfedbb4 100644
--- a/kud/deployment_infra/playbooks/configure-onap4k8s-reset.yml
+++ b/kud/deployment_infra/playbooks/configure-onap4k8s-reset.yml
@@ -15,7 +15,7 @@
file: kud-vars.yml
- name: Change the onap4k8s directory and run helm delete
- command: /usr/local/bin/helm delete --purge multicloud-onap8ks
+ command: /usr/local/bin/helm uninstall --namespace onap4k8s-ns multicloud-onap8ks
register: helm_delete
args:
chdir: /opt/multicloud/deployments/helm/onap4k8s
@@ -41,15 +41,6 @@
- debug:
var: make_clean.stdout_lines
- - name: Change the onap4k8s directory and make repo-stop
- command: /usr/bin/make repo-stop
- register: make_repo_stop
- args:
- chdir: /opt/multicloud/deployments/helm/onap4k8s
-
- - debug:
- var: make_repo_stop.stdout_lines
-
- name: clean multicloud-k8s path
file:
state: absent
diff --git a/kud/deployment_infra/playbooks/configure-onap4k8s.yml b/kud/deployment_infra/playbooks/configure-onap4k8s.yml
index 11729171..48052225 100644
--- a/kud/deployment_infra/playbooks/configure-onap4k8s.yml
+++ b/kud/deployment_infra/playbooks/configure-onap4k8s.yml
@@ -27,15 +27,6 @@
yum: name=make state=present update_cache=yes
when: ansible_distribution == "CentOS"
- - name: Change the onap4k8s directory and run the command make repo
- command: /usr/bin/make repo
- register: make_repo
- args:
- chdir: /opt/multicloud/deployments/helm/onap4k8s
-
- - debug:
- var: make_repo.stdout_lines
-
- name: Change the onap4k8s directory and run the command make all
command: /usr/bin/make all
register: make_all
@@ -45,8 +36,16 @@
- debug:
var: make_all.stdout_lines
+ - name: Create onap4k8s-ns namespace
+ shell: "/usr/local/bin/kubectl create namespace onap4k8s-ns"
+ ignore_errors: True
+
+ - name: Create pod security policy role bindings
+ shell: "/usr/local/bin/kubectl -n onap4k8s-ns create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=onap4k8s-ns:default"
+ ignore_errors: True
+
- name: Change the onap4k8s directory and run the command helm install
- command: /usr/local/bin/helm install dist/packages/multicloud-k8s-5.0.0.tgz --name multicloud-onap8ks --namespace onap4k8s-ns --set service.type=NodePort
+ command: /usr/local/bin/helm install --namespace onap4k8s-ns --set service.type=NodePort multicloud-onap8ks dist/packages/multicloud-k8s-5.0.0.tgz
register: helm_install
args:
chdir: /opt/multicloud/deployments/helm/onap4k8s
diff --git a/kud/deployment_infra/playbooks/configure-optane.yml b/kud/deployment_infra/playbooks/configure-optane.yml
index 8e000aa4..01189808 100644
--- a/kud/deployment_infra/playbooks/configure-optane.yml
+++ b/kud/deployment_infra/playbooks/configure-optane.yml
@@ -12,4 +12,4 @@
- hosts: localhost
tasks:
- name: Apply Optane PMEM CSI Daemonset
- command: "{{ base_dest }}/optane/deploy_optane.sh"
+ command: "{{ optane_dest }}/deploy_optane.sh"
diff --git a/kud/deployment_infra/playbooks/configure-ovn4nfv.yml b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml
index b335f8c8..7043bf53 100644
--- a/kud/deployment_infra/playbooks/configure-ovn4nfv.yml
+++ b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml
@@ -40,6 +40,10 @@
shell: "/usr/local/bin/kubectl create namespace operator"
ignore_errors: True
+ - name: create pod security policy role bindings
+ shell: "/usr/local/bin/kubectl -n operator create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=operator:default --serviceaccount=operator:k8s-nfn-sa"
+ ignore_errors: True
+
- name: apply nfn operator label
command: "/usr/local/bin/kubectl label node {{ item }} nfnType=operator --overwrite"
with_inventory_hostnames: ovn-central
diff --git a/kud/deployment_infra/playbooks/configure-qat.yml b/kud/deployment_infra/playbooks/configure-qat.yml
index 1225b3d4..39f52403 100644
--- a/kud/deployment_infra/playbooks/configure-qat.yml
+++ b/kud/deployment_infra/playbooks/configure-qat.yml
@@ -11,5 +11,5 @@
- import_playbook: preconfigure-qat.yml
- hosts: localhost
tasks:
- - name: Apply QAT plugin previleges Daemonset
+ - name: Apply QAT plugin privileges Daemonset
command: "/usr/local/bin/kubectl apply -f {{ playbook_dir }}/../images/qat_plugin_privileges.yaml"
diff --git a/kud/deployment_infra/playbooks/configure-sriov.yml b/kud/deployment_infra/playbooks/configure-sriov.yml
index 45f276c6..c0b7c9e0 100644
--- a/kud/deployment_infra/playbooks/configure-sriov.yml
+++ b/kud/deployment_infra/playbooks/configure-sriov.yml
@@ -9,21 +9,19 @@
##############################################################################
- import_playbook: preconfigure-sriov.yml
-
- hosts: localhost
- become: yes
+ vars:
+ sriov_enabled: "{{ groups['kube-node'] | map('extract', hostvars, ['SRIOV_ENABLED']) | select() | list | length > 0 }}"
tasks:
- - debug:
- var: SRIOV_NODE
- name: Apply Multus
shell: "/usr/local/bin/kubectl apply -f {{ playbook_dir }}/../images/multus-daemonset.yml"
- when: SRIOV_NODE
+ when: sriov_enabled
- name: Apply SRIOV CNI
shell: "/usr/local/bin/kubectl apply -f {{ playbook_dir }}/../images/sriov-cni.yml"
- when: SRIOV_NODE
+ when: sriov_enabled
- name: Apply SRIOV DaemonSet
shell: "/usr/local/bin/kubectl apply -f {{ playbook_dir }}/../images/sriov-daemonset.yml"
- when: SRIOV_NODE
+ when: sriov_enabled
- name: Apply SRIOV Network Attachment definition
shell: "/usr/local/bin/kubectl apply -f {{ playbook_dir }}/sriov-nad.yml"
- when: SRIOV_NODE
+ when: sriov_enabled
diff --git a/kud/deployment_infra/playbooks/configure-virtlet.yml b/kud/deployment_infra/playbooks/configure-virtlet.yml
index d2461f73..6ba840ce 100644
--- a/kud/deployment_infra/playbooks/configure-virtlet.yml
+++ b/kud/deployment_infra/playbooks/configure-virtlet.yml
@@ -40,7 +40,7 @@
- regexp: 'centos/(\d+)-(\d+)'
url: 'https://cloud.centos.org/centos/$1/images/CentOS-$1-x86_64-GenericCloud-$2.qcow2'
- name: fedora
- url: https://dl.fedoraproject.org/pub/fedora/linux/releases/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2
+ url: https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2
{% if lookup('env','http_proxy') != "" %}
transports:
"":
diff --git a/kud/deployment_infra/playbooks/install_qat.sh b/kud/deployment_infra/playbooks/install_qat.sh
index 57adb923..4a7fdef7 100644
--- a/kud/deployment_infra/playbooks/install_qat.sh
+++ b/kud/deployment_infra/playbooks/install_qat.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# Precondition:
-# QAT device installed, such as lspci | grep 37c8
+# QAT device installed, such as lspci -n | grep 37c8
# Enable grub with "intel_iommu=on iommu=pt"
ROOT=
diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml
index 30e54f03..51607020 100644
--- a/kud/deployment_infra/playbooks/kud-vars.yml
+++ b/kud/deployment_infra/playbooks/kud-vars.yml
@@ -57,12 +57,14 @@ cmk_untaint_required: true
#cmk_exclusive_mode: packed # choose between: packed, spread, default: packed
go_version: '1.12.5'
-kubespray_version: 2.12.6
-helm_client_version: 2.13.1
+kubespray_version: 2.14.1
+# This matches the helm_version from kubespray defaults
+helm_client_version: 3.2.4
# kud playbooks not compatible with 2.8.0 - see MULTICLOUD-634
ansible_version: 2.9.7
-sriov_dest: "{{ base_dest }}/sriov"
+sriov_pkgs: make,gcc
+sriov_dest: "{{ base_dest }}/sriov_driver"
sriov_driver_source_type: "tarball"
sriov_driver_version: 3.7.34
sriov_driver_url: "https://downloadmirror.intel.com/28943/eng/iavf-{{ sriov_driver_version }}.tar.gz"
diff --git a/kud/deployment_infra/playbooks/preconfigure-kubespray.yml b/kud/deployment_infra/playbooks/preconfigure-kubespray.yml
new file mode 100644
index 00000000..78e7eda6
--- /dev/null
+++ b/kud/deployment_infra/playbooks/preconfigure-kubespray.yml
@@ -0,0 +1,19 @@
+---
+# The mitogen module used in kubespray requires python2 on the nodes.
+# On some distributions (i.e. Ubuntu 18.04), the default version of
+# python is python3.
+#
+# When python2 is not present a failure message similar to "bash:
+# /usr/bin/python: No such file or directory" will be reported.
+#
+# Note the use of "strategy: linear" below to temporarily bypass
+# mitogen.
+#
+- name: Install python2
+ hosts: k8s-cluster
+ strategy: linear
+ tasks:
+ - name: Install python2
+ package:
+ name: python
+ state: present
diff --git a/kud/deployment_infra/playbooks/preconfigure-optane.yml b/kud/deployment_infra/playbooks/preconfigure-optane.yml
index 64622895..135371ea 100644
--- a/kud/deployment_infra/playbooks/preconfigure-optane.yml
+++ b/kud/deployment_infra/playbooks/preconfigure-optane.yml
@@ -22,6 +22,14 @@
state: directory
path: "{{ optane_dest }}"
ignore_errors: yes
+ - copy:
+ src: "{{ playbook_dir }}/deploy_optane.sh"
+ dest: "{{ optane_dest }}"
+ - name: Changing perm of "sh", adding "+x"
+ shell: "chmod +x deploy_optane.sh"
+ args:
+ chdir: "{{ optane_dest }}"
+ warn: False
- hosts: kube-node
become: yes
@@ -61,14 +69,6 @@
chdir: "optane"
warn: False
- copy:
- src: "{{ playbook_dir }}/deploy_optane.sh"
- dest: optane
- - name: Changing perm of "sh", adding "+x"
- shell: "chmod +x deploy_optane.sh"
- args:
- chdir: "optane"
- warn: False
- - copy:
src: "{{ playbook_dir }}/../images/pmem-csi-lvm.yaml"
dest: optane
- copy:
diff --git a/kud/deployment_infra/playbooks/preconfigure-qat.yml b/kud/deployment_infra/playbooks/preconfigure-qat.yml
index f5d797f1..ef8446f8 100644
--- a/kud/deployment_infra/playbooks/preconfigure-qat.yml
+++ b/kud/deployment_infra/playbooks/preconfigure-qat.yml
@@ -19,12 +19,10 @@
file:
state: directory
path: "{{ qat_dest }}"
- - name: Fetching QAT driver
- block:
- - name: Download QAT driver tarball
- get_url:
- url: "{{ qat_driver_url }}"
- dest: "{{ qat_dest }}/{{ qat_package }}.tar.gz"
+ - name: Download QAT driver tarball
+ get_url:
+ url: "{{ qat_driver_url }}"
+ dest: "{{ qat_dest }}/{{ qat_package }}.tar.gz"
- hosts: kube-node
become: yes
@@ -33,21 +31,13 @@
include_vars:
file: kud-vars.yml
tasks:
- - name: Create a destination for driver folder in the target's /tmp
- file:
- state: directory
- path: "{{ item }}"
- with_items:
- - "{{ base_dest }}/quick-assist/{{ qat_package }}"
- - name: Create QAT dest folder
+ - name: Create destination folder for QAT check script
file:
state: directory
- path: "qat"
- - name: Register QAT env variable
- shell: "echo {{ QAT_ENABLED | default(False) }}"
+ path: "{{ base_dest }}/qat"
- name: Create QAT check script
copy:
- dest: "qat/qat.sh"
+ dest: "{{ base_dest }}/qat/qat.sh"
content: |
#!/bin/bash
qat_device=$( for i in 0434 0435 37c8 6f54 19e2; \
@@ -59,15 +49,11 @@
else
echo "True"
fi
- - name: Changing perm of "sh", adding "+x"
- shell: "chmod +x qat.sh"
- args:
- chdir: "qat"
- warn: False
- - name: Run the script and re-evaluate the variable.
- command: "./qat.sh"
+ mode: 0755
+ - name: Run QAT check script and re-evaluate the variable
+ command: ./qat.sh
args:
- chdir: "qat"
+ chdir: "{{ base_dest }}/qat"
register: output
- debug:
var: output.stdout_lines
@@ -75,70 +61,68 @@
QAT_ENABLED: "{{ output.stdout }}"
- debug:
var: output
- - name: Clean the script and folder.
+ - name: Clean QAT check script and folder
file:
- path: qat
+ path: "{{ base_dest }}/qat"
state: absent
- - name: bootstrap | install qat compilation packages
- package:
- name: "{{ item }}"
- state: present
- with_items:
- - pciutils
- - build-essential
- - libudev-dev
- - pkg-config
- when: QAT_ENABLED
- - copy:
- src: "{{ qat_dest }}/{{ qat_package }}.tar.gz"
- dest: "{{ base_dest }}/quick-assist"
- remote_src: no
- when: QAT_ENABLED
- - name: Extract QAT source code
- unarchive:
- src: "{{ qat_dest }}/{{ qat_package }}.tar.gz"
- dest: "{{ base_dest }}/quick-assist/{{ qat_package }}"
- when: QAT_ENABLED
- - name: Configure the target
- command: ./configure --enable-icp-sriov=host
- args:
- chdir: "{{ base_dest }}/quick-assist/{{ qat_package }}"
- when: QAT_ENABLED
- - name: build qat driver
- make:
- chdir: "{{ base_dest }}/quick-assist/{{ qat_package }}"
- target: "{{ item }}"
- loop:
- - clean
- - uninstall
- - install
- when: QAT_ENABLED
- - name: Create QAT driver folder in the target destination
- file:
- state: directory
- path: "{{ item }}"
- with_items:
- - qat_driver_dest
- when: QAT_ENABLED
- - name: Copy QAT build directory qat target destination
- command: "cp -r {{ base_dest }}/quick-assist/{{ qat_package }}/build/ /root/qat_driver_dest/"
- when: QAT_ENABLED
- - name: Copy QAT driver install script to target folder
- command: "cp {{ playbook_dir }}/install_qat.sh /root/qat_driver_dest/build/install.sh"
- when: QAT_ENABLED
- - name: Copy QAT to target folder
- command: "cp /etc/default/qat /root/qat_driver_dest/build"
- when: QAT_ENABLED
- - name: Changing perm of "install.sh", adding "+x"
- file: dest=~/qat_driver_dest/build/install.sh mode=a+x
- when: QAT_ENABLED
- - name: Run a script with arguments
- command: ./install.sh chdir=/root/qat_driver_dest/build
- when: QAT_ENABLED
- - name: get qat devices
- shell: /usr/local/bin/adf_ctl status | grep up | awk '{print $4 substr($1, 4)}' | tr -d ','
- register: qat_devices
- when: QAT_ENABLED
- - name: Updating the qat device SSL values to avoid duplication
- command: "./substitute.sh chdir={{ playbook_dir }}"
+ - name: Install QAT driver
+ block:
+ - name: Install QAT compilation packages
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - pciutils
+ - build-essential
+ - libudev-dev
+ - pkg-config
+ - name: Create destination folder for QAT source code
+ file:
+ state: directory
+ path: "{{ qat_dest }}/{{ qat_package }}"
+ - name: Extract QAT source code
+ unarchive:
+ src: "{{ qat_dest }}/{{ qat_package }}.tar.gz"
+ dest: "{{ qat_dest }}/{{ qat_package }}"
+ - name: Configure the target
+ command: ./configure --enable-icp-sriov=host
+ args:
+ chdir: "{{ qat_dest }}/{{ qat_package }}"
+ - name: Build QAT driver
+ make:
+ chdir: "{{ qat_dest }}/{{ qat_package }}"
+ target: "{{ item }}"
+ loop:
+ - clean
+ - uninstall
+ - install
+ - name: Copy QAT driver install script to target folder
+ copy:
+ src: "install_qat.sh"
+ dest: "{{ qat_dest }}/{{ qat_package }}/build"
+ mode: 0755
+ - name: Copy /etc/default/qat to target folder
+ copy:
+ src: "/etc/default/qat"
+ dest: "{{ qat_dest }}/{{ qat_package }}/build"
+ remote_src: yes
+ - name: Run a script with arguments
+ command: ./install_qat.sh
+ args:
+ chdir: "{{ qat_dest }}/{{ qat_package }}/build"
+ - name: Copy QAT substitue script to target folder
+ copy:
+ src: "substitute.sh"
+ dest: "{{ qat_dest }}/{{ qat_package }}/build"
+ mode: 0755
+ - name: Update the QAT device SSL values to avoid duplication
+ command: ./substitute.sh
+ args:
+ chdir: "{{ qat_dest }}/{{ qat_package }}/build"
+ - name: Restart acceleration driver framework
+ command: adf_ctl restart
+ - name: Restart QAT service
+ service:
+ name: qat_service
+ state: restarted
when: QAT_ENABLED
diff --git a/kud/deployment_infra/playbooks/preconfigure-sriov.yml b/kud/deployment_infra/playbooks/preconfigure-sriov.yml
index 4c633ced..8c95aae8 100644
--- a/kud/deployment_infra/playbooks/preconfigure-sriov.yml
+++ b/kud/deployment_infra/playbooks/preconfigure-sriov.yml
@@ -8,111 +8,101 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- hosts: kube-node
- become: yes
- pre_tasks:
- - name: Create SRIOV driver folder in the target destination
- file:
- state: directory
- path: "{{ item }}"
- with_items:
- - sriov
- - copy:
- src: "{{ playbook_dir }}/sriov_hardware_check.sh"
- dest: sriov
- - name: Changing perm of "sh", adding "+x"
- shell: "chmod +x sriov_hardware_check.sh"
- args:
- chdir: "sriov"
- warn: False
- - name: Register SRIOV
- shell: "echo {{ SRIOV | default(False) }}"
- - name: Run the script and Re-evaluate the variable
- command: sriov/sriov_hardware_check.sh
- register: output
- - set_fact:
- _SRIOV: "{{ output.stdout }}"
- - name: Recreate the conf file for every host
- file:
- path: /tmp/sriov.conf
- state: absent
- delegate_to: localhost
- - lineinfile : >
- dest=/tmp/sriov.conf
- create=yes
- line='{{_SRIOV}}'
- delegate_to: localhost
- - name: Clean the script and folder.
- file:
- path: sriov
- state: absent
-
-# Run the following task only if the SRIOV is set to True
-# i.e when SRIOV hardware is available
- hosts: localhost
become: yes
pre_tasks:
- - name: Read SRIOV value from the conf file.
- command: cat /tmp/sriov.conf
- register: installer_output
- become: yes
- - set_fact:
- SRIOV_NODE: "{{ installer_output.stdout }}"
- name: Load kud variables
include_vars:
file: kud-vars.yml
- when: SRIOV_NODE
tasks:
- - name: Create sriov folder
+ - name: Create SRIOV dest folder
file:
state: directory
path: "{{ sriov_dest }}"
- ignore_errors: yes
- when: SRIOV_NODE
- - name: Get SRIOV compatible driver
- get_url: "url={{ sriov_driver_url }} dest=/tmp/{{ sriov_package }}.tar.gz"
- when: SRIOV_NODE
- - name: Extract sriov source code
- unarchive:
- src: "/tmp/{{ sriov_package }}.tar.gz"
- dest: "{{ sriov_dest }}"
- when: SRIOV_NODE
- - name: Build the default target
- make:
- chdir: "{{ sriov_dest }}/{{ sriov_package }}/src"
- become: yes
- when: SRIOV_NODE
-# Copy all the driver and install script into target node
+ - name: Fetching SRIOV driver
+ block:
+ - name: Download SRIOV driver tarball
+ get_url:
+ url: "{{ sriov_driver_url }}"
+ dest: "{{ sriov_dest }}/{{ sriov_package }}.tar.gz"
+
- hosts: kube-node
become: yes
pre_tasks:
- name: Load kud variables
include_vars:
file: kud-vars.yml
- when: _SRIOV
tasks:
- - name: create SRIOV driver folder in the target destination
+ - name: Create a destination for driver folder in the target's /tmp
+ file:
+ state: directory
+ path: "{{ item }}"
+ with_items:
+ - "{{ base_dest }}/sriov/{{ sriov_package }}"
+ - name: Create SRIOV dest folder
+ file:
+ state: directory
+ path: "sriov"
+ - name: Register SRIOV env variable
+ shell: "echo {{ SRIOV_ENABLED | default(False) }}"
+ - name: Copy SRIOV check script to target
+ copy:
+ src: "{{ playbook_dir }}/sriov_hardware_check.sh"
+ dest: sriov
+ mode: 0755
+ - name: Run the script and re-evaluate the variable
+ command: "sriov/sriov_hardware_check.sh"
+ register: output
+ - debug:
+ var: output.stdout_lines
+ - set_fact:
+ SRIOV_ENABLED: "{{ output.stdout }}"
+ - debug:
+ var: output
+ - name: Clean the script and folder
+ file:
+ path: sriov
+ state: absent
+ - name: Install SRIOV compilation packges
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ sriov_pkgs }}"
+ when: SRIOV_ENABLED
+ - name: Extract SRIOV source code
+ unarchive:
+ src: "{{ sriov_dest }}/{{ sriov_package }}.tar.gz"
+ dest: "{{ base_dest }}/sriov"
+ when: SRIOV_ENABLED
+ - name: Build the SRIOV target
+ make:
+ chdir: "{{ base_dest }}/sriov/{{ sriov_package }}/src"
+ when: SRIOV_ENABLED
+ - name: Create SRIOV driver folder in the target destination
file:
state: directory
path: "{{ item }}"
with_items:
- sriov_driver
- when: _SRIOV
- - copy:
- src: "{{ sriov_dest }}/{{ sriov_package }}/src/iavf.ko"
+ when: SRIOV_ENABLED
+ - name: Copy SRIOV module to target destination
+ copy:
+ src: "{{ base_dest }}/sriov/{{ sriov_package }}/src/iavf.ko"
dest: sriov_driver
- remote_src: no
- when: _SRIOV
- - copy:
+ remote_src: yes
+ when: SRIOV_ENABLED
+ - name: Copy SRIOV install script to target
+ copy:
src: "{{ playbook_dir }}/install_iavf_drivers.sh"
dest: sriov_driver/install.sh
- remote_src: no
- when: _SRIOV
- - name: Changing perm of "install.sh", adding "+x"
- file: dest=sriov_driver/install.sh mode=a+x
- when: _SRIOV
- - name: Run a script with arguments
+ mode: 0755
+ when: SRIOV_ENABLED
+ - name: Run the install script with arguments
shell: ./install.sh
args:
chdir: "sriov_driver"
- when: _SRIOV
+ when: SRIOV_ENABLED
+ - name: Clean the SRIOV folder
+ file:
+ path: "{{ base_dest }}/sriov"
+ state: absent
diff --git a/kud/hosting_providers/containerized/installer.sh b/kud/hosting_providers/containerized/installer.sh
index ae16b1dd..b2ec52af 100755
--- a/kud/hosting_providers/containerized/installer.sh
+++ b/kud/hosting_providers/containerized/installer.sh
@@ -36,7 +36,6 @@ function _install_ansible {
pip install --no-cache-dir ansible==$version
}
-# install_k8s() - Install Kubernetes using kubespray tool
function install_kubespray {
echo "Deploying kubernetes"
version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \
@@ -50,7 +49,6 @@ function install_kubespray {
_install_ansible
wget https://github.com/kubernetes-incubator/kubespray/archive/$tarball
tar -C $dest_folder -xzf $tarball
- mv $dest_folder/kubespray-$version/ansible.cfg /etc/ansible/ansible.cfg
chown -R root:root $dest_folder/kubespray-$version
mkdir -p ${local_release_dir}/containers
rm $tarball
@@ -79,11 +77,14 @@ function install_kubespray {
fi
}
+# install_k8s() - Install Kubernetes using kubespray tool
function install_k8s {
- version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \
- awk -F ': ' '{print $2}')
local cluster_name=$1
ansible-playbook $verbose -i \
+ $kud_inventory $kud_playbooks/preconfigure-kubespray.yml \
+ --become --become-user=root | \
+ tee $cluster_log/setup-kubernetes.log
+ ansible-playbook $verbose -i \
$kud_inventory $dest_folder/kubespray-$version/cluster.yml \
-e cluster_name=$cluster_name --become --become-user=root | \
tee $cluster_log/setup-kubernetes.log
@@ -117,36 +118,43 @@ function install_addons {
$kud_infra_folder/galaxy-requirements.yml --ignore-errors
ansible-playbook $verbose -i \
- $kud_inventory $kud_playbooks/configure-kud.yml | \
+ $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml | \
tee $cluster_log/setup-kud.log
- for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov cmk $plugins_name}; do
+ # The order of KUD_ADDONS is important: some plugins (sriov, qat)
+ # require nfd to be enabled.
+ for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk $plugins_name}; do
echo "Deploying $addon using configure-$addon.yml playbook.."
ansible-playbook $verbose -i \
- $kud_inventory $kud_playbooks/configure-${addon}.yml | \
+ $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-${addon}.yml | \
tee $cluster_log/setup-${addon}.log
done
echo "Run the test cases if testing_enabled is set to true."
if [[ "${testing_enabled}" == "true" ]]; then
- for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov cmk $plugins_name}; do
+ failed_kud_tests=""
+ for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk $plugins_name}; do
pushd $kud_tests
- bash ${addon}.sh
+ bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}"
case $addon in
"onap4k8s" )
echo "Test the onap4k8s plugin installation"
for functional_test in plugin_edgex plugin_fw plugin_eaa; do
- bash ${functional_test}.sh --external
+ bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
done
;;
"emco" )
echo "Test the emco plugin installation"
for functional_test in plugin_fw_v2; do
- bash ${functional_test}.sh --external
+ bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
done
;;
esac
popd
done
+ if [[ ! -z "$failed_kud_tests" ]]; then
+ echo "Test cases failed:${failed_kud_tests}"
+ return 1
+ fi
fi
echo "Add-ons deployment complete..."
}
@@ -199,6 +207,9 @@ function install_pkg {
}
function install_cluster {
+ version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \
+ awk -F ': ' '{print $2}')
+ export ANSIBLE_CONFIG=$dest_folder/kubespray-$version/ansible.cfg
install_k8s $1
if [ ${2:+1} ]; then
echo "install default addons and $2"
diff --git a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
index 5560dd97..18a55035 100644
--- a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
@@ -49,14 +49,9 @@ kubectl_localhost: true
local_volumes_enabled: true
local_volume_provisioner_enabled: true
-## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.16.9
-
# Helm deployment
helm_enabled: true
-docker_version: 'latest'
-
# Kube-proxy proxyMode configuration.
# NOTE: Ipvs is based on netfilter hook function, but uses hash table as the underlying data structure and
# works in the kernel space
@@ -84,3 +79,37 @@ kube_pods_subnet: 10.244.64.0/18
# disable localdns cache
enable_nodelocaldns: false
+
+# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
+podsecuritypolicy_enabled: true
+# The restricted spec is identical to the kubespray podsecuritypolicy_privileged_spec, with the replacement of
+# allowedCapabilities:
+# - '*'
+# by
+# requiredDropCapabilities:
+# - NET_RAW
+podsecuritypolicy_restricted_spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ volumes:
+ - '*'
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ hostIPC: true
+ hostPID: true
+ requiredDropCapabilities:
+ - NET_RAW
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'RunAsAny'
+ fsGroup:
+ rule: 'RunAsAny'
+ readOnlyRootFilesystem: false
+ # This will fail if allowed-unsafe-sysctls is not set accordingly in kubelet flags
+ allowedUnsafeSysctls:
+ - '*'
diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh
index 71e4d8b7..43638b4f 100755
--- a/kud/hosting_providers/vagrant/installer.sh
+++ b/kud/hosting_providers/vagrant/installer.sh
@@ -102,6 +102,7 @@ function _set_environment_file {
echo "export OVN_CENTRAL_ADDRESS=$(get_ovn_central_address)" | sudo tee --append /etc/environment
echo "export KUBE_CONFIG_DIR=/opt/kubeconfig" | sudo tee --append /etc/environment
echo "export CSAR_DIR=/opt/csar" | sudo tee --append /etc/environment
+ echo "export ANSIBLE_CONFIG=${ANSIBLE_CONFIG}" | sudo tee --append /etc/environment
}
# install_k8s() - Install Kubernetes using kubespray tool
@@ -117,7 +118,6 @@ function install_k8s {
_install_ansible
wget https://github.com/kubernetes-incubator/kubespray/archive/$tarball
sudo tar -C $dest_folder -xzf $tarball
- sudo mv $dest_folder/kubespray-$version/ansible.cfg /etc/ansible/ansible.cfg
sudo chown -R $USER $dest_folder/kubespray-$version
sudo mkdir -p ${local_release_dir}/containers
rm $tarball
@@ -139,6 +139,8 @@ function install_k8s {
if [[ -n "${https_proxy:-}" ]]; then
echo "https_proxy: \"$https_proxy\"" | tee --append $kud_inventory_folder/group_vars/all.yml
fi
+ export ANSIBLE_CONFIG=$dest_folder/kubespray-$version/ansible.cfg
+ ansible-playbook $verbose -i $kud_inventory $kud_playbooks/preconfigure-kubespray.yml --become --become-user=root | sudo tee $log_folder/setup-kubernetes.log
ansible-playbook $verbose -i $kud_inventory $dest_folder/kubespray-$version/cluster.yml --become --become-user=root | sudo tee $log_folder/setup-kubernetes.log
# Configure environment
@@ -155,17 +157,24 @@ function install_addons {
_install_ansible
sudo ansible-galaxy install $verbose -r $kud_infra_folder/galaxy-requirements.yml --ignore-errors
ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml | sudo tee $log_folder/setup-kud.log
+ # The order of KUD_ADDONS is important: some plugins (sriov, qat)
+ # require nfd to be enabled.
for addon in ${KUD_ADDONS:-topology-manager virtlet ovn4nfv nfd sriov qat optane cmk}; do
echo "Deploying $addon using configure-$addon.yml playbook.."
ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-${addon}.yml | sudo tee $log_folder/setup-${addon}.log
done
echo "Run the test cases if testing_enabled is set to true."
if [[ "${testing_enabled}" == "true" ]]; then
+ failed_kud_tests=""
for addon in ${KUD_ADDONS:-multus topology-manager virtlet ovn4nfv nfd sriov qat optane cmk}; do
pushd $kud_tests
- bash ${addon}.sh
+ bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}"
popd
done
+ if [[ ! -z "$failed_kud_tests" ]]; then
+ echo "Test cases failed:${failed_kud_tests}"
+ return 1
+ fi
fi
echo "Add-ons deployment complete..."
}
diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
index 30fd5c0b..5b06b788 100644
--- a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
@@ -50,9 +50,6 @@ enable_nodelocaldns: false
local_volumes_enabled: true
local_volume_provisioner_enabled: true
-## Change this to use another Kubernetes version, e.g. a current beta release
-kube_version: v1.16.9
-
# Helm deployment
helm_enabled: true
@@ -79,3 +76,37 @@ download_localhost: True
kube_service_addresses: 10.244.0.0/18
# Subnet for Pod IPs
kube_pods_subnet: 10.244.64.0/18
+
+# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
+podsecuritypolicy_enabled: true
+# The restricted spec is identical to the kubespray podsecuritypolicy_privileged_spec, with the replacement of
+# allowedCapabilities:
+# - '*'
+# by
+# requiredDropCapabilities:
+# - NET_RAW
+podsecuritypolicy_restricted_spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ volumes:
+ - '*'
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ hostIPC: true
+ hostPID: true
+ requiredDropCapabilities:
+ - NET_RAW
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'RunAsAny'
+ fsGroup:
+ rule: 'RunAsAny'
+ readOnlyRootFilesystem: false
+ # This will fail if allowed-unsafe-sysctls is not set accordingly in kubelet flags
+ allowedUnsafeSysctls:
+ - '*'
diff --git a/kud/tests/emco.sh b/kud/tests/emco.sh
index 8b459b97..2b8eab1e 100755
--- a/kud/tests/emco.sh
+++ b/kud/tests/emco.sh
@@ -424,7 +424,7 @@ function deleteOrchestratorData {
delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}"
print_msg "deleteOrchestratorData done"
}
diff --git a/kud/tests/plugin_fw.sh b/kud/tests/plugin_fw.sh
index a503d661..de9c12e2 100755
--- a/kud/tests/plugin_fw.sh
+++ b/kud/tests/plugin_fw.sh
@@ -124,9 +124,9 @@ print_msg "Retrieving VNF details"
response="$(call_api "${base_url}/instance/${vnf_id}")"
echo "$response"
print_msg "Assert additional label has been assigned to rb instance"
-test "$(jq -r .request.labels.testCaseName <<< "${response}")" == plugin_fw.sh
+test "$(jq -r '.request.labels.testCaseName' <<< "${response}")" == plugin_fw.sh
print_msg "Assert ReleaseName has been correctly overriden"
-test "$(jq -r .request.release-name <<< "${response}")" == "${release_name}"
+test "$(jq -r '.request."release-name"' <<< "${response}")" == "${release_name}"
#Teardown
print_msg "Deleting VNF Instance"
diff --git a/kud/tests/qat.sh b/kud/tests/qat.sh
index 2f8d212a..8365f700 100755
--- a/kud/tests/qat.sh
+++ b/kud/tests/qat.sh
@@ -10,16 +10,13 @@
set -o pipefail
-qat_device=$( for i in 0434 0435 37c8 6f54 19e2; \
- do lspci -d 8086:$i -m; done |\
- grep -i "Quick*" | head -n 1 | cut -d " " -f 5 )
-#Checking if the QAT device is on the node
-if [ -z "$qat_device" ]; then
- echo "False. This test case cannot run. Qat device unavailable."
+qat_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.capacity."qat.intel.com/cy2_dc2">="1") | .metadata.name')
+if [ -z "$qat_capable_nodes" ]; then
+ echo "This test case cannot run. QAT device unavailable."
QAT_ENABLED=False
exit 0
else
- echo "True. Can run QAT on this device."
+ echo "Can run QAT on this cluster."
QAT_ENABLED=True
fi
@@ -78,9 +75,7 @@ kubectl create -f $HOME/$pod_name.yaml --validate=false
allocated_node_resource=$(kubectl describe node | grep "qat.intel.com" | tail -n1 |awk '{print $(NF)}')
echo "The allocated resource of the node is: " $allocated_node_resource
-adf_ctl restart
-systemctl restart qat_service
-kubectl exec -it pod-case-01 -- openssl engine -c -t qat
+kubectl exec pod-case-01 -- openssl engine -c -t qat
kubectl delete pod $pod_name --now
echo "Test complete."
diff --git a/kud/tests/sriov.sh b/kud/tests/sriov.sh
index 2dea576e..e617ea62 100755
--- a/kud/tests/sriov.sh
+++ b/kud/tests/sriov.sh
@@ -10,17 +10,12 @@
set -o pipefail
-ethernet_adpator_version=$( lspci | grep "Ethernet Controller XL710" | head -n 1 | cut -d " " -f 8 )
-if [ -z "$ethernet_adpator_version" ]; then
- echo " Ethernet adapator version is not set. SRIOV test case cannot run on this machine"
+sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.capacity."intel.com/intel_sriov_700">="2") | .metadata.name')
+if [ -z "$sriov_capable_nodes" ]; then
+ echo "SRIOV test case cannot run on the cluster."
exit 0
-fi
-#checking for the right hardware version of NIC on the machine
-if [ $ethernet_adpator_version == "XL710" ]; then
- echo "NIC card specs match. SRIOV option avaiable for this version."
else
- echo -e "Failed. The version supplied does not match.\nTest cannot be executed."
- exit 0
+ echo "SRIOV option avaiable in the cluster."
fi
pod_name=pod-case-01
diff --git a/kud/tests/topology-manager.sh b/kud/tests/topology-manager.sh
index bbffd4d7..7d434386 100755
--- a/kud/tests/topology-manager.sh
+++ b/kud/tests/topology-manager.sh
@@ -15,9 +15,8 @@ set -o pipefail
source _common.sh
source _functions.sh
-ethernet_adpator_version=$( lspci | grep "Ethernet Controller XL710" | head -n 1 | cut -d " " -f 8 )
-if [ -z "$ethernet_adpator_version" ]; then
- echo " Ethernet adapator version is not set. Topology manager test case cannot run on this machine"
+if [ -z "$( lspci | grep "Ethernet Controller XL710" | head -n 1 | cut -d " " -f 8 )" ]; then
+ echo "Ethernet adaptor version is not set. Topology manager test case cannot run on this machine"
exit 0
else
echo "NIC card specs match. Topology manager option avaiable for this version."