aboutsummaryrefslogtreecommitdiffstats
path: root/kud
diff options
context:
space:
mode:
authorTodd Malsbary <todd.malsbary@intel.com>2020-11-20 15:42:54 -0800
committerTodd Malsbary <todd.malsbary@intel.com>2020-12-09 15:08:21 -0800
commit5f99856b3cdc3c11e82f0f67b3da973d43e47fc7 (patch)
treeeb70b409c7b70a3a248016790c361f27ff076db8 /kud
parent7e06fbaa3d1293ca9b25aeb7ea7cb7be2179e30a (diff)
Enable pod security policies
The intention with this change is to disable CAP_NET_RAW (which can be a security vulnerability) for created Pods. kubespray provides the podsecuritypolicy_enabled variable for enabling privileged (for kube-system) and restricted (for everyone else) policies. Enabling this requires binding the KUD_ADDONs to the privileged policy and specifying the security context correctly for Pods running in the default namespace. As of this change, the only difference between the privileged and restricted security policies is the dropping of CAP_NET_RAW in the restricted policy. To use the default restricted policy provided with kubespray, additional changes must be made to the Pods that are run in the default namespace (such as runing as a non-root user, not requesting privileged mode, etc.). Issue-ID: MULTICLOUD-1256 Signed-off-by: Todd Malsbary <todd.malsbary@intel.com> Change-Id: I7d6add122ad4046f9116ef03a249f5c9da1d7eec
Diffstat (limited to 'kud')
-rw-r--r--kud/deployment_infra/images/nfd-master.yaml17
-rw-r--r--kud/deployment_infra/playbooks/configure-emco.yml10
-rw-r--r--kud/deployment_infra/playbooks/configure-onap4k8s.yml10
-rw-r--r--kud/deployment_infra/playbooks/configure-ovn4nfv.yml4
-rw-r--r--kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml36
-rw-r--r--kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml34
6 files changed, 107 insertions, 4 deletions
diff --git a/kud/deployment_infra/images/nfd-master.yaml b/kud/deployment_infra/images/nfd-master.yaml
index 846bb753..4e07c2ed 100644
--- a/kud/deployment_infra/images/nfd-master.yaml
+++ b/kud/deployment_infra/images/nfd-master.yaml
@@ -37,6 +37,23 @@ subjects:
name: nfd-master
namespace: node-feature-discovery
---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: psp:default:privileged
+ namespace: node-feature-discovery
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:privileged
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: node-feature-discovery
+- kind: ServiceAccount
+ name: nfd-master
+ namespace: node-feature-discovery
+---
apiVersion: apps/v1
kind: DaemonSet
metadata:
diff --git a/kud/deployment_infra/playbooks/configure-emco.yml b/kud/deployment_infra/playbooks/configure-emco.yml
index 7a4cf926..96b4a23d 100644
--- a/kud/deployment_infra/playbooks/configure-emco.yml
+++ b/kud/deployment_infra/playbooks/configure-emco.yml
@@ -36,12 +36,20 @@
- debug:
var: make_all.stdout_lines
+ - name: Create emco namespace
+ shell: "/usr/local/bin/kubectl create namespace emco"
+ ignore_errors: True
+
+ - name: Create pod security policy role bindings
+ shell: "/usr/local/bin/kubectl -n emco create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=emco:default --serviceaccount=emco:emco-fluentd"
+ ignore_errors: True
+
- name: Get cluster name
shell: "kubectl -n kube-system get configmap/kubeadm-config -o yaml | grep clusterName: | awk '{print $2}'"
register: cluster_name
- name: Change the emco directory and run the command helm install
- command: /usr/local/bin/helm install --namespace emco --create-namespace --set emco-tools.fluentd.clusterDomain={{ cluster_name.stdout }} emco dist/packages/emco-0.1.0.tgz
+ command: /usr/local/bin/helm install --namespace emco --set emco-tools.fluentd.clusterDomain={{ cluster_name.stdout }} emco dist/packages/emco-0.1.0.tgz
register: helm_install
args:
chdir: /opt/multicloud/deployments/helm/v2/emco
diff --git a/kud/deployment_infra/playbooks/configure-onap4k8s.yml b/kud/deployment_infra/playbooks/configure-onap4k8s.yml
index c016cf1c..48052225 100644
--- a/kud/deployment_infra/playbooks/configure-onap4k8s.yml
+++ b/kud/deployment_infra/playbooks/configure-onap4k8s.yml
@@ -36,8 +36,16 @@
- debug:
var: make_all.stdout_lines
+ - name: Create onap4k8s-ns namespace
+ shell: "/usr/local/bin/kubectl create namespace onap4k8s-ns"
+ ignore_errors: True
+
+ - name: Create pod security policy role bindings
+ shell: "/usr/local/bin/kubectl -n onap4k8s-ns create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=onap4k8s-ns:default"
+ ignore_errors: True
+
- name: Change the onap4k8s directory and run the command helm install
- command: /usr/local/bin/helm install --namespace onap4k8s-ns --create-namespace --set service.type=NodePort multicloud-onap8ks dist/packages/multicloud-k8s-5.0.0.tgz
+ command: /usr/local/bin/helm install --namespace onap4k8s-ns --set service.type=NodePort multicloud-onap8ks dist/packages/multicloud-k8s-5.0.0.tgz
register: helm_install
args:
chdir: /opt/multicloud/deployments/helm/onap4k8s
diff --git a/kud/deployment_infra/playbooks/configure-ovn4nfv.yml b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml
index b335f8c8..7043bf53 100644
--- a/kud/deployment_infra/playbooks/configure-ovn4nfv.yml
+++ b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml
@@ -40,6 +40,10 @@
shell: "/usr/local/bin/kubectl create namespace operator"
ignore_errors: True
+ - name: create pod security policy role bindings
+ shell: "/usr/local/bin/kubectl -n operator create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=operator:default --serviceaccount=operator:k8s-nfn-sa"
+ ignore_errors: True
+
- name: apply nfn operator label
command: "/usr/local/bin/kubectl label node {{ item }} nfnType=operator --overwrite"
with_inventory_hostnames: ovn-central
diff --git a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
index 0a2953ce..18a55035 100644
--- a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
@@ -52,8 +52,6 @@ local_volume_provisioner_enabled: true
# Helm deployment
helm_enabled: true
-docker_version: 'latest'
-
# Kube-proxy proxyMode configuration.
# NOTE: Ipvs is based on netfilter hook function, but uses hash table as the underlying data structure and
# works in the kernel space
@@ -81,3 +79,37 @@ kube_pods_subnet: 10.244.64.0/18
# disable localdns cache
enable_nodelocaldns: false
+
+# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
+podsecuritypolicy_enabled: true
+# The restricted spec is identical to the kubespray podsecuritypolicy_privileged_spec, with the replacement of
+# allowedCapabilities:
+# - '*'
+# by
+# requiredDropCapabilities:
+# - NET_RAW
+podsecuritypolicy_restricted_spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ volumes:
+ - '*'
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ hostIPC: true
+ hostPID: true
+ requiredDropCapabilities:
+ - NET_RAW
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'RunAsAny'
+ fsGroup:
+ rule: 'RunAsAny'
+ readOnlyRootFilesystem: false
+ # This will fail if allowed-unsafe-sysctls is not set accordingly in kubelet flags
+ allowedUnsafeSysctls:
+ - '*'
diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
index ba79b4b9..5b06b788 100644
--- a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
@@ -76,3 +76,37 @@ download_localhost: True
kube_service_addresses: 10.244.0.0/18
# Subnet for Pod IPs
kube_pods_subnet: 10.244.64.0/18
+
+# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
+podsecuritypolicy_enabled: true
+# The restricted spec is identical to the kubespray podsecuritypolicy_privileged_spec, with the replacement of
+# allowedCapabilities:
+# - '*'
+# by
+# requiredDropCapabilities:
+# - NET_RAW
+podsecuritypolicy_restricted_spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ volumes:
+ - '*'
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ hostIPC: true
+ hostPID: true
+ requiredDropCapabilities:
+ - NET_RAW
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'RunAsAny'
+ fsGroup:
+ rule: 'RunAsAny'
+ readOnlyRootFilesystem: false
+ # This will fail if allowed-unsafe-sysctls is not set accordingly in kubelet flags
+ allowedUnsafeSysctls:
+ - '*'