diff options
Diffstat (limited to 'kud/deployment_infra')
21 files changed, 294 insertions, 255 deletions
diff --git a/kud/deployment_infra/galaxy-requirements.yml b/kud/deployment_infra/galaxy-requirements.yml index 3191dc19..9747dc99 100644 --- a/kud/deployment_infra/galaxy-requirements.yml +++ b/kud/deployment_infra/galaxy-requirements.yml @@ -10,6 +10,6 @@ - src: andrewrothstein.go version: v2.1.15 - src: andrewrothstein.kubernetes-helm - version: v1.2.17 + version: v1.3.16 - src: geerlingguy.docker version: 2.5.2 diff --git a/kud/deployment_infra/images/multus-daemonset.yml b/kud/deployment_infra/images/multus-daemonset.yml index d6d8d533..09759360 100644 --- a/kud/deployment_infra/images/multus-daemonset.yml +++ b/kud/deployment_infra/images/multus-daemonset.yml @@ -1,11 +1,10 @@ --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: network-attachment-definitions.k8s.cni.cncf.io spec: group: k8s.cni.cncf.io - version: v1 scope: Namespaced names: plural: network-attachment-definitions @@ -13,16 +12,27 @@ spec: kind: NetworkAttachmentDefinition shortNames: - net-attach-def - validation: - openAPIV3Schema: - properties: - spec: + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing + Working Group to express the intent for attaching pods to one or more logical or physical + networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' + type: object properties: - config: - type: string + spec: + description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' + type: object + properties: + config: + description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' + type: string --- kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: multus rules: @@ -39,9 +49,18 @@ rules: verbs: - get - update + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update --- kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: multus roleRef: @@ -68,11 +87,18 @@ metadata: tier: node app: multus data: + # NOTE: If you'd prefer to manually apply a configuration file, you may create one here. + # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod + # change the "args" line below from + # - "--multus-conf-file=auto" + # to: + # "--multus-conf-file=/tmp/multus-conf/70-multus.conf" + # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the + # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet. cni-conf.json: | { "name": "multus-cni-network", "type": "multus", - "cniVersion": "0.3.1", "capabilities": { "portMappings": true }, @@ -109,11 +135,11 @@ metadata: labels: tier: node app: multus + name: multus spec: selector: matchLabels: - tier: node - app: multus + name: multus updateStrategy: type: RollingUpdate template: @@ -121,20 +147,22 @@ spec: labels: tier: node app: multus + name: multus spec: hostNetwork: true nodeSelector: - beta.kubernetes.io/arch: amd64 + kubernetes.io/arch: amd64 tolerations: - operator: Exists effect: NoSchedule serviceAccountName: multus containers: - name: kube-multus - image: nfvpe/multus:v3.3-tp + image: integratedcloudnative/multus:v3.4.1-tp command: ["/entrypoint.sh"] args: - - "--multus-conf-file=/tmp/multus-conf/00-multus.conf" + - "--multus-conf-file=auto" + - "--cni-version=0.3.1" resources: requests: cpu: "100m" @@ -163,4 +191,4 @@ spec: name: multus-cni-config items: - key: cni-conf.json - path: 00-multus.conf + path: 70-multus.conf diff --git a/kud/deployment_infra/images/nfd-master.yaml b/kud/deployment_infra/images/nfd-master.yaml index 846bb753..4e07c2ed 100644 --- a/kud/deployment_infra/images/nfd-master.yaml +++ b/kud/deployment_infra/images/nfd-master.yaml @@ -37,6 +37,23 @@ subjects: name: nfd-master namespace: node-feature-discovery --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: psp:default:privileged + namespace: node-feature-discovery +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:privileged +subjects: +- kind: ServiceAccount + name: default + namespace: node-feature-discovery +- kind: ServiceAccount + name: nfd-master + namespace: node-feature-discovery +--- apiVersion: apps/v1 kind: DaemonSet metadata: diff --git a/kud/deployment_infra/images/sriov-cni.yml b/kud/deployment_infra/images/sriov-cni.yml index 7503b872..570b00ee 100644 --- a/kud/deployment_infra/images/sriov-cni.yml +++ b/kud/deployment_infra/images/sriov-cni.yml @@ -21,6 +21,15 @@ spec: tier: node app: sriov-cni spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: feature.node.kubernetes.io/network-sriov.capable + operator: In + values: + - "true" hostNetwork: true nodeSelector: beta.kubernetes.io/arch: amd64 diff --git a/kud/deployment_infra/images/sriov-daemonset.yml b/kud/deployment_infra/images/sriov-daemonset.yml index e392028d..41b1cbaa 100644 --- a/kud/deployment_infra/images/sriov-daemonset.yml +++ b/kud/deployment_infra/images/sriov-daemonset.yml @@ -46,6 +46,15 @@ spec: tier: node app: sriovdp spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: feature.node.kubernetes.io/network-sriov.capable + operator: In + values: + - "true" hostNetwork: true hostPID: true nodeSelector: diff --git a/kud/deployment_infra/playbooks/configure-emco-reset.yml b/kud/deployment_infra/playbooks/configure-emco-reset.yml index a4560cc2..7cad36e4 100644 --- a/kud/deployment_infra/playbooks/configure-emco-reset.yml +++ b/kud/deployment_infra/playbooks/configure-emco-reset.yml @@ -15,7 +15,7 @@ file: kud-vars.yml - name: Change the emco directory and run helm delete - command: /usr/local/bin/helm delete --purge emco + command: /usr/local/bin/helm uninstall --namespace emco emco register: helm_delete args: chdir: /opt/multicloud/deployments/helm/v2/emco @@ -41,15 +41,6 @@ - debug: var: make_clean.stdout_lines - - name: Change the emco directory and make repo-stop - command: /usr/bin/make repo-stop - register: make_repo_stop - args: - chdir: /opt/multicloud/deployments/helm/v2/emco - - - debug: - var: make_repo_stop.stdout_lines - - name: clean multicloud-k8s path file: state: absent diff --git a/kud/deployment_infra/playbooks/configure-emco.yml b/kud/deployment_infra/playbooks/configure-emco.yml index 255ce6a4..96b4a23d 100644 --- a/kud/deployment_infra/playbooks/configure-emco.yml +++ b/kud/deployment_infra/playbooks/configure-emco.yml @@ -27,15 +27,6 @@ yum: name=make state=present update_cache=yes when: ansible_distribution == "CentOS" - - name: Change the emco directory and run the command make repo - command: /usr/bin/make repo - register: make_repo - args: - chdir: /opt/multicloud/deployments/helm/v2/emco - - - debug: - var: make_repo.stdout_lines - - name: Change the emco directory and run the command make all command: /usr/bin/make all register: make_all @@ -45,8 +36,20 @@ - debug: var: make_all.stdout_lines + - name: Create emco namespace + shell: "/usr/local/bin/kubectl create namespace emco" + ignore_errors: True + + - name: Create pod security policy role bindings + shell: "/usr/local/bin/kubectl -n emco create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=emco:default --serviceaccount=emco:emco-fluentd" + ignore_errors: True + + - name: Get cluster name + shell: "kubectl -n kube-system get configmap/kubeadm-config -o yaml | grep clusterName: | awk '{print $2}'" + register: cluster_name + - name: Change the emco directory and run the command helm install - command: /usr/local/bin/helm install dist/packages/emco-0.1.0.tgz --name emco --namespace emco + command: /usr/local/bin/helm install --namespace emco --set emco-tools.fluentd.clusterDomain={{ cluster_name.stdout }} emco dist/packages/emco-0.1.0.tgz register: helm_install args: chdir: /opt/multicloud/deployments/helm/v2/emco diff --git a/kud/deployment_infra/playbooks/configure-kud.yml b/kud/deployment_infra/playbooks/configure-kud.yml index 6ac0477d..0e32e69d 100644 --- a/kud/deployment_infra/playbooks/configure-kud.yml +++ b/kud/deployment_infra/playbooks/configure-kud.yml @@ -23,11 +23,6 @@ when: helm_client.rc != 0 vars: kubernetes_helm_ver: "v{{ helm_client_version }}" - tasks: - - name: Initialize helm client - command: helm init -c - args: - creates: ~/.helm - hosts: kube-node become: yes diff --git a/kud/deployment_infra/playbooks/configure-onap4k8s-reset.yml b/kud/deployment_infra/playbooks/configure-onap4k8s-reset.yml index d6f2f6bc..ddfedbb4 100644 --- a/kud/deployment_infra/playbooks/configure-onap4k8s-reset.yml +++ b/kud/deployment_infra/playbooks/configure-onap4k8s-reset.yml @@ -15,7 +15,7 @@ file: kud-vars.yml - name: Change the onap4k8s directory and run helm delete - command: /usr/local/bin/helm delete --purge multicloud-onap8ks + command: /usr/local/bin/helm uninstall --namespace onap4k8s-ns multicloud-onap8ks register: helm_delete args: chdir: /opt/multicloud/deployments/helm/onap4k8s @@ -41,15 +41,6 @@ - debug: var: make_clean.stdout_lines - - name: Change the onap4k8s directory and make repo-stop - command: /usr/bin/make repo-stop - register: make_repo_stop - args: - chdir: /opt/multicloud/deployments/helm/onap4k8s - - - debug: - var: make_repo_stop.stdout_lines - - name: clean multicloud-k8s path file: state: absent diff --git a/kud/deployment_infra/playbooks/configure-onap4k8s.yml b/kud/deployment_infra/playbooks/configure-onap4k8s.yml index 11729171..48052225 100644 --- a/kud/deployment_infra/playbooks/configure-onap4k8s.yml +++ b/kud/deployment_infra/playbooks/configure-onap4k8s.yml @@ -27,15 +27,6 @@ yum: name=make state=present update_cache=yes when: ansible_distribution == "CentOS" - - name: Change the onap4k8s directory and run the command make repo - command: /usr/bin/make repo - register: make_repo - args: - chdir: /opt/multicloud/deployments/helm/onap4k8s - - - debug: - var: make_repo.stdout_lines - - name: Change the onap4k8s directory and run the command make all command: /usr/bin/make all register: make_all @@ -45,8 +36,16 @@ - debug: var: make_all.stdout_lines + - name: Create onap4k8s-ns namespace + shell: "/usr/local/bin/kubectl create namespace onap4k8s-ns" + ignore_errors: True + + - name: Create pod security policy role bindings + shell: "/usr/local/bin/kubectl -n onap4k8s-ns create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=onap4k8s-ns:default" + ignore_errors: True + - name: Change the onap4k8s directory and run the command helm install - command: /usr/local/bin/helm install dist/packages/multicloud-k8s-5.0.0.tgz --name multicloud-onap8ks --namespace onap4k8s-ns --set service.type=NodePort + command: /usr/local/bin/helm install --namespace onap4k8s-ns --set service.type=NodePort multicloud-onap8ks dist/packages/multicloud-k8s-5.0.0.tgz register: helm_install args: chdir: /opt/multicloud/deployments/helm/onap4k8s diff --git a/kud/deployment_infra/playbooks/configure-optane.yml b/kud/deployment_infra/playbooks/configure-optane.yml index 8e000aa4..01189808 100644 --- a/kud/deployment_infra/playbooks/configure-optane.yml +++ b/kud/deployment_infra/playbooks/configure-optane.yml @@ -12,4 +12,4 @@ - hosts: localhost tasks: - name: Apply Optane PMEM CSI Daemonset - command: "{{ base_dest }}/optane/deploy_optane.sh" + command: "{{ optane_dest }}/deploy_optane.sh" diff --git a/kud/deployment_infra/playbooks/configure-ovn4nfv.yml b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml index b335f8c8..7043bf53 100644 --- a/kud/deployment_infra/playbooks/configure-ovn4nfv.yml +++ b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml @@ -40,6 +40,10 @@ shell: "/usr/local/bin/kubectl create namespace operator" ignore_errors: True + - name: create pod security policy role bindings + shell: "/usr/local/bin/kubectl -n operator create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=operator:default --serviceaccount=operator:k8s-nfn-sa" + ignore_errors: True + - name: apply nfn operator label command: "/usr/local/bin/kubectl label node {{ item }} nfnType=operator --overwrite" with_inventory_hostnames: ovn-central diff --git a/kud/deployment_infra/playbooks/configure-qat.yml b/kud/deployment_infra/playbooks/configure-qat.yml index 1225b3d4..39f52403 100644 --- a/kud/deployment_infra/playbooks/configure-qat.yml +++ b/kud/deployment_infra/playbooks/configure-qat.yml @@ -11,5 +11,5 @@ - import_playbook: preconfigure-qat.yml - hosts: localhost tasks: - - name: Apply QAT plugin previleges Daemonset + - name: Apply QAT plugin privileges Daemonset command: "/usr/local/bin/kubectl apply -f {{ playbook_dir }}/../images/qat_plugin_privileges.yaml" diff --git a/kud/deployment_infra/playbooks/configure-sriov.yml b/kud/deployment_infra/playbooks/configure-sriov.yml index 45f276c6..c0b7c9e0 100644 --- a/kud/deployment_infra/playbooks/configure-sriov.yml +++ b/kud/deployment_infra/playbooks/configure-sriov.yml @@ -9,21 +9,19 @@ ############################################################################## - import_playbook: preconfigure-sriov.yml - - hosts: localhost - become: yes + vars: + sriov_enabled: "{{ groups['kube-node'] | map('extract', hostvars, ['SRIOV_ENABLED']) | select() | list | length > 0 }}" tasks: - - debug: - var: SRIOV_NODE - name: Apply Multus shell: "/usr/local/bin/kubectl apply -f {{ playbook_dir }}/../images/multus-daemonset.yml" - when: SRIOV_NODE + when: sriov_enabled - name: Apply SRIOV CNI shell: "/usr/local/bin/kubectl apply -f {{ playbook_dir }}/../images/sriov-cni.yml" - when: SRIOV_NODE + when: sriov_enabled - name: Apply SRIOV DaemonSet shell: "/usr/local/bin/kubectl apply -f {{ playbook_dir }}/../images/sriov-daemonset.yml" - when: SRIOV_NODE + when: sriov_enabled - name: Apply SRIOV Network Attachment definition shell: "/usr/local/bin/kubectl apply -f {{ playbook_dir }}/sriov-nad.yml" - when: SRIOV_NODE + when: sriov_enabled diff --git a/kud/deployment_infra/playbooks/configure-virtlet.yml b/kud/deployment_infra/playbooks/configure-virtlet.yml index d2461f73..6ba840ce 100644 --- a/kud/deployment_infra/playbooks/configure-virtlet.yml +++ b/kud/deployment_infra/playbooks/configure-virtlet.yml @@ -40,7 +40,7 @@ - regexp: 'centos/(\d+)-(\d+)' url: 'https://cloud.centos.org/centos/$1/images/CentOS-$1-x86_64-GenericCloud-$2.qcow2' - name: fedora - url: https://dl.fedoraproject.org/pub/fedora/linux/releases/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2 + url: https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2 {% if lookup('env','http_proxy') != "" %} transports: "": diff --git a/kud/deployment_infra/playbooks/install_qat.sh b/kud/deployment_infra/playbooks/install_qat.sh index 57adb923..4a7fdef7 100644 --- a/kud/deployment_infra/playbooks/install_qat.sh +++ b/kud/deployment_infra/playbooks/install_qat.sh @@ -1,7 +1,7 @@ #!/bin/bash # Precondition: -# QAT device installed, such as lspci | grep 37c8 +# QAT device installed, such as lspci -n | grep 37c8 # Enable grub with "intel_iommu=on iommu=pt" ROOT= diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml index 30e54f03..51607020 100644 --- a/kud/deployment_infra/playbooks/kud-vars.yml +++ b/kud/deployment_infra/playbooks/kud-vars.yml @@ -57,12 +57,14 @@ cmk_untaint_required: true #cmk_exclusive_mode: packed # choose between: packed, spread, default: packed go_version: '1.12.5' -kubespray_version: 2.12.6 -helm_client_version: 2.13.1 +kubespray_version: 2.14.1 +# This matches the helm_version from kubespray defaults +helm_client_version: 3.2.4 # kud playbooks not compatible with 2.8.0 - see MULTICLOUD-634 ansible_version: 2.9.7 -sriov_dest: "{{ base_dest }}/sriov" +sriov_pkgs: make,gcc +sriov_dest: "{{ base_dest }}/sriov_driver" sriov_driver_source_type: "tarball" sriov_driver_version: 3.7.34 sriov_driver_url: "https://downloadmirror.intel.com/28943/eng/iavf-{{ sriov_driver_version }}.tar.gz" diff --git a/kud/deployment_infra/playbooks/preconfigure-kubespray.yml b/kud/deployment_infra/playbooks/preconfigure-kubespray.yml new file mode 100644 index 00000000..78e7eda6 --- /dev/null +++ b/kud/deployment_infra/playbooks/preconfigure-kubespray.yml @@ -0,0 +1,19 @@ +--- +# The mitogen module used in kubespray requires python2 on the nodes. +# On some distributions (i.e. Ubuntu 18.04), the default version of +# python is python3. +# +# When python2 is not present a failure message similar to "bash: +# /usr/bin/python: No such file or directory" will be reported. +# +# Note the use of "strategy: linear" below to temporarily bypass +# mitogen. +# +- name: Install python2 + hosts: k8s-cluster + strategy: linear + tasks: + - name: Install python2 + package: + name: python + state: present diff --git a/kud/deployment_infra/playbooks/preconfigure-optane.yml b/kud/deployment_infra/playbooks/preconfigure-optane.yml index 64622895..135371ea 100644 --- a/kud/deployment_infra/playbooks/preconfigure-optane.yml +++ b/kud/deployment_infra/playbooks/preconfigure-optane.yml @@ -22,6 +22,14 @@ state: directory path: "{{ optane_dest }}" ignore_errors: yes + - copy: + src: "{{ playbook_dir }}/deploy_optane.sh" + dest: "{{ optane_dest }}" + - name: Changing perm of "sh", adding "+x" + shell: "chmod +x deploy_optane.sh" + args: + chdir: "{{ optane_dest }}" + warn: False - hosts: kube-node become: yes @@ -61,14 +69,6 @@ chdir: "optane" warn: False - copy: - src: "{{ playbook_dir }}/deploy_optane.sh" - dest: optane - - name: Changing perm of "sh", adding "+x" - shell: "chmod +x deploy_optane.sh" - args: - chdir: "optane" - warn: False - - copy: src: "{{ playbook_dir }}/../images/pmem-csi-lvm.yaml" dest: optane - copy: diff --git a/kud/deployment_infra/playbooks/preconfigure-qat.yml b/kud/deployment_infra/playbooks/preconfigure-qat.yml index f5d797f1..ef8446f8 100644 --- a/kud/deployment_infra/playbooks/preconfigure-qat.yml +++ b/kud/deployment_infra/playbooks/preconfigure-qat.yml @@ -19,12 +19,10 @@ file: state: directory path: "{{ qat_dest }}" - - name: Fetching QAT driver - block: - - name: Download QAT driver tarball - get_url: - url: "{{ qat_driver_url }}" - dest: "{{ qat_dest }}/{{ qat_package }}.tar.gz" + - name: Download QAT driver tarball + get_url: + url: "{{ qat_driver_url }}" + dest: "{{ qat_dest }}/{{ qat_package }}.tar.gz" - hosts: kube-node become: yes @@ -33,21 +31,13 @@ include_vars: file: kud-vars.yml tasks: - - name: Create a destination for driver folder in the target's /tmp - file: - state: directory - path: "{{ item }}" - with_items: - - "{{ base_dest }}/quick-assist/{{ qat_package }}" - - name: Create QAT dest folder + - name: Create destination folder for QAT check script file: state: directory - path: "qat" - - name: Register QAT env variable - shell: "echo {{ QAT_ENABLED | default(False) }}" + path: "{{ base_dest }}/qat" - name: Create QAT check script copy: - dest: "qat/qat.sh" + dest: "{{ base_dest }}/qat/qat.sh" content: | #!/bin/bash qat_device=$( for i in 0434 0435 37c8 6f54 19e2; \ @@ -59,15 +49,11 @@ else echo "True" fi - - name: Changing perm of "sh", adding "+x" - shell: "chmod +x qat.sh" - args: - chdir: "qat" - warn: False - - name: Run the script and re-evaluate the variable. - command: "./qat.sh" + mode: 0755 + - name: Run QAT check script and re-evaluate the variable + command: ./qat.sh args: - chdir: "qat" + chdir: "{{ base_dest }}/qat" register: output - debug: var: output.stdout_lines @@ -75,70 +61,68 @@ QAT_ENABLED: "{{ output.stdout }}" - debug: var: output - - name: Clean the script and folder. + - name: Clean QAT check script and folder file: - path: qat + path: "{{ base_dest }}/qat" state: absent - - name: bootstrap | install qat compilation packages - package: - name: "{{ item }}" - state: present - with_items: - - pciutils - - build-essential - - libudev-dev - - pkg-config - when: QAT_ENABLED - - copy: - src: "{{ qat_dest }}/{{ qat_package }}.tar.gz" - dest: "{{ base_dest }}/quick-assist" - remote_src: no - when: QAT_ENABLED - - name: Extract QAT source code - unarchive: - src: "{{ qat_dest }}/{{ qat_package }}.tar.gz" - dest: "{{ base_dest }}/quick-assist/{{ qat_package }}" - when: QAT_ENABLED - - name: Configure the target - command: ./configure --enable-icp-sriov=host - args: - chdir: "{{ base_dest }}/quick-assist/{{ qat_package }}" - when: QAT_ENABLED - - name: build qat driver - make: - chdir: "{{ base_dest }}/quick-assist/{{ qat_package }}" - target: "{{ item }}" - loop: - - clean - - uninstall - - install - when: QAT_ENABLED - - name: Create QAT driver folder in the target destination - file: - state: directory - path: "{{ item }}" - with_items: - - qat_driver_dest - when: QAT_ENABLED - - name: Copy QAT build directory qat target destination - command: "cp -r {{ base_dest }}/quick-assist/{{ qat_package }}/build/ /root/qat_driver_dest/" - when: QAT_ENABLED - - name: Copy QAT driver install script to target folder - command: "cp {{ playbook_dir }}/install_qat.sh /root/qat_driver_dest/build/install.sh" - when: QAT_ENABLED - - name: Copy QAT to target folder - command: "cp /etc/default/qat /root/qat_driver_dest/build" - when: QAT_ENABLED - - name: Changing perm of "install.sh", adding "+x" - file: dest=~/qat_driver_dest/build/install.sh mode=a+x - when: QAT_ENABLED - - name: Run a script with arguments - command: ./install.sh chdir=/root/qat_driver_dest/build - when: QAT_ENABLED - - name: get qat devices - shell: /usr/local/bin/adf_ctl status | grep up | awk '{print $4 substr($1, 4)}' | tr -d ',' - register: qat_devices - when: QAT_ENABLED - - name: Updating the qat device SSL values to avoid duplication - command: "./substitute.sh chdir={{ playbook_dir }}" + - name: Install QAT driver + block: + - name: Install QAT compilation packages + package: + name: "{{ item }}" + state: present + with_items: + - pciutils + - build-essential + - libudev-dev + - pkg-config + - name: Create destination folder for QAT source code + file: + state: directory + path: "{{ qat_dest }}/{{ qat_package }}" + - name: Extract QAT source code + unarchive: + src: "{{ qat_dest }}/{{ qat_package }}.tar.gz" + dest: "{{ qat_dest }}/{{ qat_package }}" + - name: Configure the target + command: ./configure --enable-icp-sriov=host + args: + chdir: "{{ qat_dest }}/{{ qat_package }}" + - name: Build QAT driver + make: + chdir: "{{ qat_dest }}/{{ qat_package }}" + target: "{{ item }}" + loop: + - clean + - uninstall + - install + - name: Copy QAT driver install script to target folder + copy: + src: "install_qat.sh" + dest: "{{ qat_dest }}/{{ qat_package }}/build" + mode: 0755 + - name: Copy /etc/default/qat to target folder + copy: + src: "/etc/default/qat" + dest: "{{ qat_dest }}/{{ qat_package }}/build" + remote_src: yes + - name: Run a script with arguments + command: ./install_qat.sh + args: + chdir: "{{ qat_dest }}/{{ qat_package }}/build" + - name: Copy QAT substitue script to target folder + copy: + src: "substitute.sh" + dest: "{{ qat_dest }}/{{ qat_package }}/build" + mode: 0755 + - name: Update the QAT device SSL values to avoid duplication + command: ./substitute.sh + args: + chdir: "{{ qat_dest }}/{{ qat_package }}/build" + - name: Restart acceleration driver framework + command: adf_ctl restart + - name: Restart QAT service + service: + name: qat_service + state: restarted when: QAT_ENABLED diff --git a/kud/deployment_infra/playbooks/preconfigure-sriov.yml b/kud/deployment_infra/playbooks/preconfigure-sriov.yml index 4c633ced..8c95aae8 100644 --- a/kud/deployment_infra/playbooks/preconfigure-sriov.yml +++ b/kud/deployment_infra/playbooks/preconfigure-sriov.yml @@ -8,111 +8,101 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -- hosts: kube-node - become: yes - pre_tasks: - - name: Create SRIOV driver folder in the target destination - file: - state: directory - path: "{{ item }}" - with_items: - - sriov - - copy: - src: "{{ playbook_dir }}/sriov_hardware_check.sh" - dest: sriov - - name: Changing perm of "sh", adding "+x" - shell: "chmod +x sriov_hardware_check.sh" - args: - chdir: "sriov" - warn: False - - name: Register SRIOV - shell: "echo {{ SRIOV | default(False) }}" - - name: Run the script and Re-evaluate the variable - command: sriov/sriov_hardware_check.sh - register: output - - set_fact: - _SRIOV: "{{ output.stdout }}" - - name: Recreate the conf file for every host - file: - path: /tmp/sriov.conf - state: absent - delegate_to: localhost - - lineinfile : > - dest=/tmp/sriov.conf - create=yes - line='{{_SRIOV}}' - delegate_to: localhost - - name: Clean the script and folder. - file: - path: sriov - state: absent - -# Run the following task only if the SRIOV is set to True -# i.e when SRIOV hardware is available - hosts: localhost become: yes pre_tasks: - - name: Read SRIOV value from the conf file. - command: cat /tmp/sriov.conf - register: installer_output - become: yes - - set_fact: - SRIOV_NODE: "{{ installer_output.stdout }}" - name: Load kud variables include_vars: file: kud-vars.yml - when: SRIOV_NODE tasks: - - name: Create sriov folder + - name: Create SRIOV dest folder file: state: directory path: "{{ sriov_dest }}" - ignore_errors: yes - when: SRIOV_NODE - - name: Get SRIOV compatible driver - get_url: "url={{ sriov_driver_url }} dest=/tmp/{{ sriov_package }}.tar.gz" - when: SRIOV_NODE - - name: Extract sriov source code - unarchive: - src: "/tmp/{{ sriov_package }}.tar.gz" - dest: "{{ sriov_dest }}" - when: SRIOV_NODE - - name: Build the default target - make: - chdir: "{{ sriov_dest }}/{{ sriov_package }}/src" - become: yes - when: SRIOV_NODE -# Copy all the driver and install script into target node + - name: Fetching SRIOV driver + block: + - name: Download SRIOV driver tarball + get_url: + url: "{{ sriov_driver_url }}" + dest: "{{ sriov_dest }}/{{ sriov_package }}.tar.gz" + - hosts: kube-node become: yes pre_tasks: - name: Load kud variables include_vars: file: kud-vars.yml - when: _SRIOV tasks: - - name: create SRIOV driver folder in the target destination + - name: Create a destination for driver folder in the target's /tmp + file: + state: directory + path: "{{ item }}" + with_items: + - "{{ base_dest }}/sriov/{{ sriov_package }}" + - name: Create SRIOV dest folder + file: + state: directory + path: "sriov" + - name: Register SRIOV env variable + shell: "echo {{ SRIOV_ENABLED | default(False) }}" + - name: Copy SRIOV check script to target + copy: + src: "{{ playbook_dir }}/sriov_hardware_check.sh" + dest: sriov + mode: 0755 + - name: Run the script and re-evaluate the variable + command: "sriov/sriov_hardware_check.sh" + register: output + - debug: + var: output.stdout_lines + - set_fact: + SRIOV_ENABLED: "{{ output.stdout }}" + - debug: + var: output + - name: Clean the script and folder + file: + path: sriov + state: absent + - name: Install SRIOV compilation packges + package: + name: "{{ item }}" + state: present + with_items: "{{ sriov_pkgs }}" + when: SRIOV_ENABLED + - name: Extract SRIOV source code + unarchive: + src: "{{ sriov_dest }}/{{ sriov_package }}.tar.gz" + dest: "{{ base_dest }}/sriov" + when: SRIOV_ENABLED + - name: Build the SRIOV target + make: + chdir: "{{ base_dest }}/sriov/{{ sriov_package }}/src" + when: SRIOV_ENABLED + - name: Create SRIOV driver folder in the target destination file: state: directory path: "{{ item }}" with_items: - sriov_driver - when: _SRIOV - - copy: - src: "{{ sriov_dest }}/{{ sriov_package }}/src/iavf.ko" + when: SRIOV_ENABLED + - name: Copy SRIOV module to target destination + copy: + src: "{{ base_dest }}/sriov/{{ sriov_package }}/src/iavf.ko" dest: sriov_driver - remote_src: no - when: _SRIOV - - copy: + remote_src: yes + when: SRIOV_ENABLED + - name: Copy SRIOV install script to target + copy: src: "{{ playbook_dir }}/install_iavf_drivers.sh" dest: sriov_driver/install.sh - remote_src: no - when: _SRIOV - - name: Changing perm of "install.sh", adding "+x" - file: dest=sriov_driver/install.sh mode=a+x - when: _SRIOV - - name: Run a script with arguments + mode: 0755 + when: SRIOV_ENABLED + - name: Run the install script with arguments shell: ./install.sh args: chdir: "sriov_driver" - when: _SRIOV + when: SRIOV_ENABLED + - name: Clean the SRIOV folder + file: + path: "{{ base_dest }}/sriov" + state: absent |