{# -*- encoding: utf-8 -*- ============LICENSE_START======================================================= org.onap.vvp/engagementmgr =================================================================== Copyright © 2017 AT&T Intellectual Property. All rights reserved. =================================================================== Unless otherwise specified, all software contained herein is licensed under the Apache License, Version 2.0 (the “License”); you may not use this software except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Unless otherwise specified, all documentation contained herein is licensed under the Creative Commons License, Attribution 4.0 Intl. (the “License”); you may not use this documentation except in compliance with the License. You may obtain a copy of the License at https://creativecommons.org/licenses/by/4.0/ Unless required by applicable law or agreed to in writing, documentation distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============LICENSE_END============================================ ECOMP is a trademark and service mark of AT&T Intellectual Property. #} --- systemd: units: {% for mount in mounts %} - name: {{mount.name}}.mount enable: true contents: | [Mount] What={{mount.dev}} Where={{mount.dest}} Type={{mount.type}} [Install] WantedBy=local-fs.target {% endfor %} - name: ice-filesystems.service enable: true contents: | [Unit] After=systemd-tmpfiles-setup.service [Service] Type=oneshot {% if manually_grow_root %} ExecStart=/usr/bin/cgpt resize /dev/sda9 ExecStart=/usr/sbin/xfs_growfs /dev/sda9 {% endif %} ExecStart=/usr/bin/cp -r --preserve=all /usr/share/coreos /etc/coreos ExecStart=/usr/bin/systemctl disable ice-filesystems.service [Install] WantedBy=multi-user.target {% raw %} - name: sshd.socket enable: true contents: | [Unit] Description=OpenSSH Server Socket Conflicts=sshd.service [Socket] ListenStream={{.ssh_ip}}:22 FreeBind=true Accept=yes [Install] WantedBy=sockets.target - name: etcd2.service enable: true dropins: - name: 40-etcd-cluster.conf contents: | [Service] Environment="ETCD_NAME={{.etcd_name}}" Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379" Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS={{.etcd_initial_peers}}" Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379" Environment="ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380" Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}" Environment="ETCD_STRICT_RECONFIG_CHECK=true" - name: flanneld.service dropins: - name: 40-ExecStartPre-symlink.conf contents: | [Service] EnvironmentFile=-/etc/flannel/options.env ExecStartPre=/opt/init-flannel - name: docker.service dropins: - name: 40-flannel.conf contents: | [Unit] Requires=flanneld.service After=flanneld.service [Service] EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env - name: locksmithd.service dropins: - name: 40-etcd-lock.conf contents: | [Service] Environment="REBOOT_STRATEGY=off" - name: k8s-certs@.service contents: | [Unit] Description=Fetch Kubernetes certificate assets Requires=network-online.target After=network-online.target [Service] ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl ExecStart=/usr/bin/bash -c "[ -f /etc/kubernetes/ssl/%i ] || curl {{.k8s_cert_endpoint}}/tls/%i -o /etc/kubernetes/ssl/%i" - name: k8s-assets.target contents: | [Unit] Description=Load Kubernetes Assets Requires=k8s-certs@apiserver.pem.service After=k8s-certs@apiserver.pem.service Requires=k8s-certs@apiserver-key.pem.service After=k8s-certs@apiserver-key.pem.service Requires=k8s-certs@ca.pem.service After=k8s-certs@ca.pem.service - name: kubelet.service enable: true contents: | [Unit] Description=Kubelet via Hyperkube ACI Wants=flanneld.service Requires=k8s-assets.target After=k8s-assets.target [Service] Environment=KUBELET_VERSION={{.k8s_version}} Environment="RKT_OPTS=--uuid-file-save=/var/run/kubelet-pod.uuid \ --volume dns,kind=host,source=/etc/resolv.conf \ --mount volume=dns,target=/etc/resolv.conf \ {{ if eq .container_runtime "rkt" -}} --volume rkt,kind=host,source=/opt/bin/host-rkt \ --mount volume=rkt,target=/usr/bin/rkt \ --volume var-lib-rkt,kind=host,source=/var/lib/rkt \ --mount volume=var-lib-rkt,target=/var/lib/rkt \ --volume stage,kind=host,source=/tmp \ --mount volume=stage,target=/tmp \ {{ end -}} --volume modprobe,kind=host,source=/usr/sbin/modprobe \ --mount volume=modprobe,target=/usr/sbin/modprobe \ --volume lib-modules,kind=host,source=/lib/modules \ --mount volume=lib-modules,target=/lib/modules \ --volume mkfsxfs,kind=host,source=/usr/sbin/mkfs.xfs \ --mount volume=mkfsxfs,target=/usr/sbin/mkfs.xfs \ --volume libxfs,kind=host,source=/lib64/libxfs.so.0 \ --mount volume=libxfs,target=/lib64/libxfs.so.0 \ --volume var-log,kind=host,source=/var/log \ --mount volume=var-log,target=/var/log" ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests ExecStartPre=/usr/bin/mkdir -p /var/log/containers ExecStartPre=/usr/bin/systemctl is-active flanneld.service ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid ExecStart=/usr/lib/coreos/kubelet-wrapper \ --api-servers=http://127.0.0.1:8080 \ --register-schedulable=true \ --cni-conf-dir=/etc/kubernetes/cni/net.d \ --network-plugin=cni \ --container-runtime={{.container_runtime}} \ --rkt-path=/usr/bin/rkt \ --rkt-stage1-image=coreos.com/rkt/stage1-coreos \ --allow-privileged=true \ --pod-manifest-path=/etc/kubernetes/manifests \ --hostname-override={{.domain_name}} \ --cluster_dns={{.k8s_dns_service_ip}} \ --cluster_domain=cluster.local ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid Restart=always RestartSec=10 [Install] WantedBy=multi-user.target - name: k8s-addons.service enable: true contents: | [Unit] Description=Kubernetes Addons [Service] Type=oneshot ExecStart=/opt/k8s-addons [Install] WantedBy=multi-user.target {{ if eq .container_runtime "rkt" }} - name: rkt-api.service enable: true contents: | [Unit] Before=kubelet.service [Service] ExecStart=/usr/bin/rkt api-service Restart=always RestartSec=10 [Install] RequiredBy=kubelet.service - name: load-rkt-stage1.service enable: true contents: | [Unit] Description=Load rkt stage1 images Documentation=http://github.com/coreos/rkt Requires=network-online.target After=network-online.target Before=rkt-api.service [Service] Type=oneshot RemainAfterExit=yes ExecStart=/usr/bin/rkt fetch /usr/lib/rkt/stage1-images/stage1-coreos.aci /usr/lib/rkt/stage1-images/stage1-fly.aci --insecure-options=image [Install] RequiredBy=rkt-api.service {{ end }} {{if ne .sysdig_access_key "" }} - name: sysdig.service enable: true contents: | [Unit] Description=Sysdig Cloud Agent After=docker.service Requires=docker.service [Service] TimeoutStartSec=0 ExecStartPre=-/usr/bin/docker kill sysdig-agent ExecStartPre=-/usr/bin/docker rm sysdig-agent ExecStartPre=-/usr/bin/docker rmi sysdig-agent ExecStartPre=/usr/bin/docker pull sysdig/agent {% endraw %} ExecStart=/usr/bin/docker run --name sysdig-agent --privileged --net host --pid host -e ADDITIONAL_CONF="app_checks:\n - name: nginx\n enabled: false" -e ACCESS_KEY={{sysdig_access_key}} -e TAGS=deploy_environment:staging -v /var/lib/rkt:/host/var/lib/rkt:ro -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro sysdig/agent {% raw %} ExecStop=/usr/bin/docker stop sysdig-agent [Install] WantedBy=multi-user.target RequiredBy=k8-addons.service {{ end }} storage: filesystems: {% endraw %} {% for fs in filesystems %} - name: {{fs.device}} mount: device: {{fs.device}} format: {{fs.format}} create: force: {{fs.create.force}} {% if "options" in fs.create.keys() %} options: {% for option in fs.create.options %} - {{option}} {% endfor %} {% endif %} {% endfor %}{% raw %} files: - path: /etc/kubernetes/cni/net.d/10-flannel.conf filesystem: root contents: inline: | { "name": "podnet", "type": "flannel", "delegate": { "isDefaultGateway": true } } - path: /etc/kubernetes/cni/docker_opts_cni.env filesystem: root contents: inline: | DOCKER_OPT_BIP="" DOCKER_OPT_IPMASQ="" - path: /etc/sysctl.d/max-user-watches.conf filesystem: root contents: inline: | fs.inotify.max_user_watches=16184 - path: /etc/kubernetes/manifests/kube-proxy.yaml filesystem: root contents: inline: | apiVersion: v1 kind: Pod metadata: name: kube-proxy namespace: kube-system annotations: rkt.alpha.kubernetes.io/stage1-name-override: coreos.com/rkt/stage1-fly spec: hostNetwork: true containers: - name: kube-proxy image: quay.io/coreos/hyperkube:{{.k8s_version}} command: - /hyperkube - proxy - --master=http://127.0.0.1:8080 - --cluster-cidr={{.k8s_service_ip_range}} securityContext: privileged: true volumeMounts: - mountPath: /etc/ssl/certs name: ssl-certs-host readOnly: true - mountPath: /var/run/dbus name: dbus readOnly: false volumes: - hostPath: path: /usr/share/ca-certificates name: ssl-certs-host - hostPath: path: /var/run/dbus name: dbus - path: /etc/kubernetes/manifests/kube-apiserver.yaml filesystem: root contents: inline: | apiVersion: v1 kind: Pod metadata: name: kube-apiserver namespace: kube-system spec: hostNetwork: true containers: - name: kube-apiserver image: quay.io/coreos/hyperkube:{{.k8s_version}} command: - /hyperkube - apiserver - --bind-address=0.0.0.0 - --advertise-address={{.k8s_apiserver_advertise_address}} - --etcd-servers={{.k8s_etcd_endpoints}} - --allow-privileged=true - --service-cluster-ip-range={{.k8s_service_ip_range}} - --secure-port={{.k8s_controller_port}} - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota - --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem - --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem - --client-ca-file=/etc/kubernetes/ssl/ca.pem - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem - --runtime-config=extensions/v1beta1/networkpolicies=true - --anonymous-auth=false livenessProbe: httpGet: host: 127.0.0.1 port: 8080 path: /healthz initialDelaySeconds: 15 timeoutSeconds: 15 ports: - containerPort: {{.k8s_controller_port}} hostPort: {{.k8s_controller_port}} name: https - containerPort: 8080 hostPort: 8080 name: local volumeMounts: - mountPath: /etc/kubernetes/ssl name: ssl-certs-kubernetes readOnly: true - mountPath: /etc/ssl/certs name: ssl-certs-host readOnly: true volumes: - hostPath: path: /etc/kubernetes/ssl name: ssl-certs-kubernetes - hostPath: path: /usr/share/ca-certificates name: ssl-certs-host - path: /etc/flannel/options.env filesystem: root contents: inline: | FLANNELD_ETCD_ENDPOINTS={{.k8s_etcd_endpoints}} FLANNELD_IFACE={{.k8s_flanneld_iface}} - path: /etc/kubernetes/manifests/kube-controller-manager.yaml filesystem: root contents: inline: | apiVersion: v1 kind: Pod metadata: name: kube-controller-manager namespace: kube-system spec: containers: - name: kube-controller-manager image: quay.io/coreos/hyperkube:{{.k8s_version}} command: - /hyperkube - controller-manager - --master=http://127.0.0.1:8080 - --leader-elect=true - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem - --root-ca-file=/etc/kubernetes/ssl/ca.pem resources: requests: cpu: 200m livenessProbe: httpGet: host: 127.0.0.1 path: /healthz port: 10252 initialDelaySeconds: 15 timeoutSeconds: 15 volumeMounts: - mountPath: /etc/kubernetes/ssl name: ssl-certs-kubernetes readOnly: true - mountPath: /etc/ssl/certs name: ssl-certs-host readOnly: true hostNetwork: true volumes: - hostPath: path: /etc/kubernetes/ssl name: ssl-certs-kubernetes - hostPath: path: /usr/share/ca-certificates name: ssl-certs-host - path: /etc/kubernetes/manifests/kube-scheduler.yaml filesystem: root contents: inline: | apiVersion: v1 kind: Pod metadata: name: kube-scheduler namespace: kube-system spec: hostNetwork: true containers: - name: kube-scheduler image: quay.io/coreos/hyperkube:{{.k8s_version}} command: - /hyperkube - scheduler - --master=http://127.0.0.1:8080 - --leader-elect=true resources: requests: cpu: 100m livenessProbe: httpGet: host: 127.0.0.1 path: /healthz port: 10251 initialDelaySeconds: 15 timeoutSeconds: 15 - path: /srv/kubernetes/manifests/kube-dns-deployment.yaml filesystem: root contents: inline: | apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" spec: strategy: rollingUpdate: maxSurge: 10% maxUnavailable: 0 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns annotations: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: containers: - name: kubedns image: gcr.io/google_containers/kubedns-amd64:1.9 livenessProbe: httpGet: path: /healthz-kubedns port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /readiness port: 8081 scheme: HTTP initialDelaySeconds: 3 timeoutSeconds: 5 args: - --domain=cluster.local - --dns-port=10053 - --config-map=kube-dns - --v=2 env: - name: PROMETHEUS_PORT value: "10055" ports: - containerPort: 10053 name: dns-local protocol: UDP - containerPort: 10053 name: dns-tcp-local protocol: TCP - containerPort: 10055 name: metrics protocol: TCP - name: dnsmasq image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4 livenessProbe: httpGet: path: /healthz-dnsmasq port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - --cache-size=1000 - --no-resolv - --server=127.0.0.1#10053 - --log-facility=- ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - name: dnsmasq-metrics image: gcr.io/google_containers/dnsmasq-metrics-amd64:1.0 livenessProbe: httpGet: path: /metrics port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - --v=2 - --logtostderr ports: - containerPort: 10054 name: metrics protocol: TCP - name: healthz image: gcr.io/google_containers/exechealthz-amd64:1.2 args: - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null - --url=/healthz-dnsmasq - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null - --url=/healthz-kubedns - --port=8080 - --quiet ports: - containerPort: 8080 protocol: TCP dnsPolicy: Default - path: /srv/kubernetes/manifests/kube-dns-svc.yaml filesystem: root contents: inline: | apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" kubernetes.io/name: "KubeDNS" spec: selector: k8s-app: kube-dns clusterIP: {{.k8s_dns_service_ip}} ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP - path: /srv/kubernetes/manifests/heapster-deployment.yaml filesystem: root contents: inline: | apiVersion: extensions/v1beta1 kind: Deployment metadata: name: heapster-v1.2.0 namespace: kube-system labels: k8s-app: heapster kubernetes.io/cluster-service: "true" version: v1.2.0 spec: replicas: 1 selector: matchLabels: k8s-app: heapster version: v1.2.0 template: metadata: labels: k8s-app: heapster version: v1.2.0 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: containers: - image: gcr.io/google_containers/heapster:v1.2.0 name: heapster livenessProbe: httpGet: path: /healthz port: 8082 scheme: HTTP initialDelaySeconds: 180 timeoutSeconds: 5 command: - /heapster - --source=kubernetes.summary_api:'' - image: gcr.io/google_containers/addon-resizer:1.6 name: heapster-nanny resources: limits: cpu: 50m memory: 90Mi requests: cpu: 50m memory: 90Mi env: - name: MY_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: MY_POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace command: - /pod_nanny - --cpu=80m - --extra-cpu=4m - --memory=200Mi - --extra-memory=4Mi - --threshold=5 - --deployment=heapster-v1.2.0 - --container=heapster - --poll-period=300000 - --estimator=exponential - path: /srv/kubernetes/manifests/heapster-svc.yaml filesystem: root contents: inline: | kind: Service apiVersion: v1 metadata: name: heapster namespace: kube-system labels: kubernetes.io/cluster-service: "true" kubernetes.io/name: "Heapster" spec: ports: - port: 80 targetPort: 8082 selector: k8s-app: heapster - path: /srv/kubernetes/manifests/kube-dashboard-deployment.yaml filesystem: root contents: inline: | apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kubernetes-dashboard namespace: kube-system labels: k8s-app: kubernetes-dashboard kubernetes.io/cluster-service: "true" spec: selector: matchLabels: k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard annotations: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: containers: - name: kubernetes-dashboard image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.0 resources: # keep request = limit to keep this container in guaranteed class limits: cpu: 100m memory: 50Mi requests: cpu: 100m memory: 50Mi ports: - containerPort: 9090 livenessProbe: httpGet: path: / port: 9090 initialDelaySeconds: 30 timeoutSeconds: 30 - path: /srv/kubernetes/manifests/kube-dashboard-svc.yaml filesystem: root contents: inline: | apiVersion: v1 kind: Service metadata: name: kubernetes-dashboard namespace: kube-system labels: k8s-app: kubernetes-dashboard kubernetes.io/cluster-service: "true" spec: selector: k8s-app: kubernetes-dashboard ports: - port: 80 targetPort: 9090 - path: /opt/init-flannel filesystem: root mode: 0544 contents: inline: | #!/bin/bash -ex function init_flannel { echo "Waiting for etcd..." while true do IFS=',' read -ra ES <<< "{{.k8s_etcd_endpoints}}" for ETCD in "${ES[@]}"; do echo "Trying: $ETCD" if [ -n "$(curl --silent "$ETCD/v2/machines")" ]; then local ACTIVE_ETCD=$ETCD break fi sleep 1 done if [ -n "$ACTIVE_ETCD" ]; then break fi done RES=$(curl --silent -X PUT -d "value={\"Network\":\"{{.k8s_pod_network}}\",\"Backend\":{\"Type\":\"vxlan\"}}" "$ACTIVE_ETCD/v2/keys/coreos.com/network/config?prevExist=false") if [ -z "$(echo $RES | grep '"action":"create"')" ] && [ -z "$(echo $RES | grep 'Key already exists')" ]; then echo "Unexpected error configuring flannel pod network: $RES" fi } init_flannel {{ if eq .container_runtime "rkt" }} - path: /opt/bin/host-rkt filesystem: root mode: 0544 contents: inline: | #!/bin/sh # This is bind mounted into the kubelet rootfs and all rkt shell-outs go # through this rkt wrapper. It essentially enters the host mount namespace # (which it is already in) only for the purpose of breaking out of the chroot # before calling rkt. It makes things like rkt gc work and avoids bind mounting # in certain rkt filesystem dependancies into the kubelet rootfs. This can # eventually be obviated when the write-api stuff gets upstream and rkt gc is # through the api-server. Related issue: # https://github.com/coreos/rkt/issues/2878 exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "$@" {{ end }} - path: /opt/k8s-addons filesystem: root mode: 0544 contents: inline: | #!/bin/bash -ex echo "Waiting for Kubernetes API..." until curl --silent "http://127.0.0.1:8080/version" do sleep 5 done echo "K8S: DNS addon" curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-deployment.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" echo "K8S: Heapster addon" curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-deployment.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" echo "K8S: Dashboard addon" curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-deployment.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" - path: "/etc/modules-load.d/rbd.conf" filesystem: root contents: inline: | rbd - path: "/opt/bin/ceph-rbdnamer" filesystem: root mode: 0755 contents: inline: | #!/bin/sh DEV=$1 NUM=`echo $DEV | sed 's#p.*##g' | tr -d 'a-z'` POOL=`cat /sys/devices/rbd/$NUM/pool` IMAGE=`cat /sys/devices/rbd/$NUM/name` SNAP=`cat /sys/devices/rbd/$NUM/current_snap` if [ "$SNAP" = "-" ]; then echo -n "$POOL $IMAGE" else echo -n "$POOL $IMAGE@$SNAP" fi - path: "/etc/udev/rules.d/50-rbd.rules" filesystem: root contents: inline: | KERNEL=="rbd[0-9]*", ENV{DEVTYPE}=="disk", PROGRAM="/opt/bin/ceph-rbdnamer %k", SYMLINK+="rbd/%c{1}/%c{2}" KERNEL=="rbd[0-9]*", ENV{DEVTYPE}=="partition", PROGRAM="/opt/bin/ceph-rbdnamer %k", SYMLINK+="rbd/%c{1}/%c{2}-part%n" - path: /etc/ssh/sshd_config filesystem: root mode: 0600 user: id: 0 group: id: 0 contents: inline: | UsePrivilegeSeparation sandbox Subsystem sftp internal-sftp ClientAliveInterval 180 UseDNS no ListenAddress {{.ssh_ip}} {{ if index . "ssh_authorized_keys" }} passwd: users: - name: core ssh_authorized_keys: {{ range $element := .ssh_authorized_keys }} - {{$element}} {{end}} {{end}}{% endraw %}