aboutsummaryrefslogtreecommitdiffstats
path: root/vnfs/DAaaS/deploy
diff options
context:
space:
mode:
Diffstat (limited to 'vnfs/DAaaS/deploy')
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/.helmignore28
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/Chart.yaml8
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/0-namespace.yaml10
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/10-ingress-deployment.yaml40
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/100-gloo-crds.yaml111
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/101-knative-crds-0.5.1.yaml343
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/11-ingress-proxy-deployment.yaml65
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/12-ingress-proxy-configmap.yaml52
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/13-ingress-proxy-service.yaml23
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml58
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml49
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/16-clusteringress-proxy-service.yaml21
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml982
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/18-settings.yaml30
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml29
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml29
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml29
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml22
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml22
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml21
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/3-gloo-deployment.yaml57
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/4-gloo-service.yaml18
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/5-discovery-deployment.yaml46
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/6-gateway-deployment.yaml47
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/7-gateway-proxy-deployment.yaml67
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/8-gateway-proxy-service.yaml35
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/9-gateway-proxy-configmap.yaml54
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/values-ingress.yaml74
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/values-knative.yaml72
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/values.yaml56
-rw-r--r--vnfs/DAaaS/deploy/00-init/istio/README.md31
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/Chart.yaml7
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/NOTES.txt5
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/_helpers.tpl16
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/cluster.yml180
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrole.yaml165
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrolebinding.yaml38
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/dashboard-external-http.yaml22
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/deployment.yaml108
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/psp.yaml35
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/resources.yaml177
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/role.yaml35
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/rolebinding.yaml19
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/serviceaccount.yaml8
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/storageclass.yml28
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/tool-box.yml62
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/values.yaml75
-rw-r--r--vnfs/DAaaS/deploy/collection/.helmignore22
-rw-r--r--vnfs/DAaaS/deploy/collection/Chart.yaml5
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/cadvisor/.helmignore22
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/cadvisor/Chart.yaml19
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/NOTES.txt34
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/_helpers.tpl25
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/daemonset.yaml79
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/service.yaml37
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/cadvisor/values.yaml23
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/collectd/.helmignore21
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/collectd/Chart.yaml19
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/collectd/resources/config/collectd.conf44
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/collectd/templates/NOTES.txt34
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/collectd/templates/_helpers.tpl25
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/collectd/templates/configmap.yaml27
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/collectd/templates/daemonset.yaml83
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/collectd/templates/service.yaml32
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/collectd/values.yaml28
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/.helmignore21
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/Chart.yaml15
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/OWNERS4
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/README.md80
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/NOTES.txt15
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/_helpers.tpl55
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/daemonset.yaml98
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/endpoints.yaml17
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/monitor.yaml17
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml15
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml17
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp.yaml51
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/service.yaml22
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/serviceaccount.yaml15
-rwxr-xr-xvnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/values.yaml96
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/prometheus/.helmignore22
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/prometheus/Chart.yaml5
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/prometheus/templates/NOTES.txt15
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/prometheus/templates/_helpers.tpl47
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/prometheus/templates/prometheus.yaml47
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/prometheus/templates/service.yaml38
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/prometheus/templates/servicemonitor.yaml30
-rw-r--r--vnfs/DAaaS/deploy/collection/charts/prometheus/values.yaml79
-rw-r--r--vnfs/DAaaS/deploy/collection/values.yaml28
-rw-r--r--vnfs/DAaaS/deploy/day2_configs/collectd/README.txt14
-rw-r--r--vnfs/DAaaS/deploy/day2_configs/collectd/add_plugins.yaml47
-rw-r--r--vnfs/DAaaS/deploy/day2_configs/collectd/replace_image.yaml6
-rw-r--r--vnfs/DAaaS/deploy/day2_configs/prometheus/README.txt7
-rw-r--r--vnfs/DAaaS/deploy/day2_configs/prometheus/add_remote_write.yaml13
-rw-r--r--vnfs/DAaaS/deploy/inference-core/.helmignore22
-rw-r--r--vnfs/DAaaS/deploy/inference-core/Chart.yaml5
-rw-r--r--vnfs/DAaaS/deploy/inference-core/charts/tf-serving/.helmignore22
-rw-r--r--vnfs/DAaaS/deploy/inference-core/charts/tf-serving/Chart.yaml5
-rw-r--r--vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/NOTES.txt20
-rw-r--r--vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/_helpers.tpl41
-rw-r--r--vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/deployment.yaml138
-rw-r--r--vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/ingress.yaml55
-rw-r--r--vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/secrets.yaml31
-rw-r--r--vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/service.yaml39
-rw-r--r--vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/serviceaccount.yaml25
-rw-r--r--vnfs/DAaaS/deploy/inference-core/charts/tf-serving/values.yaml71
-rw-r--r--vnfs/DAaaS/deploy/inference-core/values.yaml30
-rw-r--r--vnfs/DAaaS/deploy/messaging/.helmignore22
-rw-r--r--vnfs/DAaaS/deploy/messaging/Chart.yaml5
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/kafka/.helmignore22
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/kafka/Chart.yaml5
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/kafka/templates/kafka-cluster.yaml29
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/kafka/values.yaml30
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/.helmignore21
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/Chart.yaml21
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/OWNERS8
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/README.md105
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/010-ServiceAccount-strimzi-cluster-operator.yaml10
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/020-ClusterRole-strimzi-cluster-operator-role.yaml259
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/020-RoleBinding-strimzi-cluster-operator.yaml25
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/021-ClusterRole-strimzi-cluster-operator-role.yaml21
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/021-ClusterRoleBinding-strimzi-cluster-operator.yaml18
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/030-ClusterRole-strimzi-kafka-broker.yaml17
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/030-ClusterRoleBinding-strimzi-cluster-operator-kafka-broker-delegation.yaml18
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/031-ClusterRole-strimzi-entity-operator.yaml52
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml25
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/032-ClusterRole-strimzi-topic-operator.yaml29
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/032-RoleBinding-strimzi-cluster-operator-topic-operator-delegation.yaml25
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/040-Crd-kafka.yaml2123
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/041-Crd-kafkaconnect.yaml559
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/042-Crd-kafkaconnects2i.yaml561
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/043-Crd-kafkatopic.yaml44
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/044-Crd-kafkauser.yaml100
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/045-Crd-kafkamirrormaker.yaml526
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/050-Deployment-strimzi-cluster-operator.yaml74
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/NOTES.txt5
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/_helpers.tpl49
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/_kafka_image_map.tpl28
-rw-r--r--vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/values.yaml89
-rw-r--r--vnfs/DAaaS/deploy/messaging/values.yaml29
-rw-r--r--vnfs/DAaaS/deploy/minio/.helmignore21
-rwxr-xr-xvnfs/DAaaS/deploy/minio/Chart.yaml22
-rwxr-xr-xvnfs/DAaaS/deploy/minio/README.md330
-rw-r--r--vnfs/DAaaS/deploy/minio/templates/NOTES.txt44
-rwxr-xr-xvnfs/DAaaS/deploy/minio/templates/_helper_create_bucket.txt89
-rw-r--r--vnfs/DAaaS/deploy/minio/templates/_helpers.tpl43
-rw-r--r--vnfs/DAaaS/deploy/minio/templates/configmap.yaml12
-rw-r--r--vnfs/DAaaS/deploy/minio/templates/deployment.yaml195
-rw-r--r--vnfs/DAaaS/deploy/minio/templates/ingress.yaml39
-rw-r--r--vnfs/DAaaS/deploy/minio/templates/networkpolicy.yaml25
-rwxr-xr-xvnfs/DAaaS/deploy/minio/templates/post-install-create-bucket-job.yaml59
-rw-r--r--vnfs/DAaaS/deploy/minio/templates/pvc.yaml27
-rw-r--r--vnfs/DAaaS/deploy/minio/templates/secrets.yaml18
-rw-r--r--vnfs/DAaaS/deploy/minio/templates/service.yaml46
-rw-r--r--vnfs/DAaaS/deploy/minio/templates/statefulset.yaml141
-rwxr-xr-xvnfs/DAaaS/deploy/minio/values.yaml331
-rw-r--r--vnfs/DAaaS/deploy/operator/.helmignore22
-rw-r--r--vnfs/DAaaS/deploy/operator/Chart.yaml5
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/.helmignore21
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/etcd-operator/Chart.yaml14
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/OWNERS6
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/README.md158
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/NOTES.txt33
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/_helpers.tpl75
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-etcd-crd.yaml18
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-clusterrole-binding.yaml20
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-deployment.yaml59
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-service-account.yaml12
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/etcd-cluster-crd.yaml25
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-cluster-role.yaml49
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-clusterrole-binding.yaml20
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-deployment.yaml81
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-service-account.yaml12
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-etcd-crd.yaml28
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-clusterrole-binding.yaml20
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-deployment.yaml63
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-service-account.yaml12
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-service.yaml20
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/etcd-operator/values.yaml153
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/m3db-operator/Chart.yaml22
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/m3db-operator/LICENSE201
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/m3db-operator/NOTES.txt12
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/m3db-operator/README.md14
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/cluster_role.yaml35
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/cluster_role_binding.yaml12
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/service_account.yaml5
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/stateful_set.yaml26
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/m3db-operator/values.yaml6
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/.helmignore25
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/Chart.yaml17
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/README.md428
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/NOTES.txt5
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/_helpers.tpl91
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/alertmanager.yaml100
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/ingress.yaml33
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/podDisruptionBudget.yaml20
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp-clusterrole.yaml15
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp-clusterrolebinding.yaml17
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp.yaml48
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/secret.yaml14
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/service.yaml42
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/serviceaccount.yaml11
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/servicemonitor.yaml21
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/exporters/node-exporter/servicemonitor.yaml18
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/configmap-dashboards.yaml23
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/configmaps-datasources.yaml19
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/etcd.yaml1110
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml926
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-coredns.yaml1323
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-node-rsrc-use.yaml953
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-cluster.yaml1338
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-namespace.yaml849
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-pod.yaml876
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/nodes.yaml1328
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/persistentvolumesusage.yaml359
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/pods.yaml500
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/statefulset.yaml873
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/servicemonitor.yaml21
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/cleanup-crds.yaml43
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/clusterrole.yaml71
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/clusterrolebinding.yaml17
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-alertmanager.yaml2477
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-prometheus.yaml3178
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-prometheusrules.yaml360
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-servicemonitor.yaml310
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/deployment.yaml71
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp-clusterrole.yaml15
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp-clusterrolebinding.yaml17
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp.yaml47
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/service.yaml41
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/serviceaccount.yaml11
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/servicemonitor.yaml20
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalAlertRelabelConfigs.yaml11
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalAlertmanagerConfigs.yaml11
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalPrometheusRules.yaml20
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalScrapeConfigs.yaml11
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/clusterrole.yaml35
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/clusterrolebinding.yaml18
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/ingress.yaml33
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/podDisruptionBudget.yaml20
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/prometheus.yaml176
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp-clusterrole.yaml15
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp-clusterrolebinding.yaml18
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp.yaml47
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/role-config.yaml16
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/role-specificNamespace.yaml27
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rolebinding-config.yaml17
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rolebinding-specificNamespace.yaml23
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/alertmanager.rules.yaml50
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/etcd.yaml136
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/general.rules.yaml46
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/k8s.rules.yaml60
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-apiserver.rules.yaml35
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml37
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml37
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-scheduler.rules.yaml59
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-absent.yaml123
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-apps.yaml156
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-resources.yaml99
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-storage.yaml58
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-system.yaml119
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/node.rules.yaml198
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/prometheus-operator.yaml39
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/prometheus.rules.yaml105
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/service.yaml44
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/serviceaccount.yaml11
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/servicemonitor.yaml21
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/servicemonitors.yaml29
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/prometheus-operator/values.yaml1148
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/sparkoperator/.helmignore1
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/sparkoperator/Chart.yaml5
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/README.md42
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/_helpers.tpl48
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-deployment.yaml79
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-rbac.yaml55
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-serviceaccount.yaml11
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-rbac.yaml44
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-serviceaccount.yaml12
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-cleanup-job.yaml32
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-init-job.yaml24
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-service.yaml19
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/sparkoperator/values.yaml28
-rw-r--r--vnfs/DAaaS/deploy/operator/resources/m3db.labels7
-rw-r--r--vnfs/DAaaS/deploy/operator/values.yaml29
-rw-r--r--vnfs/DAaaS/deploy/training-core/.helmignore23
-rw-r--r--vnfs/DAaaS/deploy/training-core/Chart.yaml5
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/.gitignore2
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/.travis.yml20
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/LICENSE201
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/README.md12
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/README.md390
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-client-k8s/Chart.yaml4
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-client-k8s/templates/client-deployment.yaml56
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/.helmignore21
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/Chart.yaml5
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/templates/_helpers.tpl64
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/templates/configmap.yaml197
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-datanode-k8s/Chart.yaml4
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-datanode-k8s/templates/datanode-daemonset.yaml191
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-journalnode-k8s/Chart.yaml4
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-journalnode-k8s/templates/journalnode-statefulset.yaml180
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/.gitignore2
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/.helmignore21
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/Chart.yaml5
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/requirements.yaml59
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/templates/_helpers.tpl264
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/values.yaml248
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/.helmignore21
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/Chart.yaml4
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/templates/statefulset.yaml99
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-namenode-k8s/Chart.yaml4
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-namenode-k8s/templates/namenode-statefulset.yaml287
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-simple-namenode-k8s/Chart.yaml4
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-simple-namenode-k8s/templates/namenode-statefulset.yaml82
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/m3db/.helmignore22
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/m3db/Chart.yaml3
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/m3db/templates/NOTES.txt1
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/m3db/templates/_helpers.tpl32
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/m3db/templates/configmap.yaml216
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/m3db/templates/etcd-cluster.yaml20
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/m3db/templates/m3dbcluster.yaml22
-rw-r--r--vnfs/DAaaS/deploy/training-core/charts/m3db/values.yaml51
-rw-r--r--vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/README.md11
-rw-r--r--vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/pom.xml111
-rw-r--r--vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/CreateKafkaConsumer.java81
-rw-r--r--vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/HdfsWriter.java40
-rw-r--r--vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/Orchestrator.java51
-rw-r--r--vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/config/Configuration.java38
-rw-r--r--vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/kafka2hdfsApp.java14
-rw-r--r--vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/resources/configs.yaml10
-rw-r--r--vnfs/DAaaS/deploy/training-core/values.yaml29
-rw-r--r--vnfs/DAaaS/deploy/visualization/.helmignore22
-rw-r--r--vnfs/DAaaS/deploy/visualization/Chart.yaml5
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/.helmignore22
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/Chart.yaml18
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/README.md240
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/dashboards/custom-dashboard.json1
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/NOTES.txt34
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/_helpers.tpl43
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/clusterrole.yaml23
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/clusterrolebinding.yaml23
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/configmap-dashboard-provider.yaml26
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/configmap.yaml71
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/dashboards-json-configmap.yaml28
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/deployment.yaml358
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/ingress.yaml42
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/podsecuritypolicy.yaml54
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/pvc.yaml24
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/role.yaml31
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/rolebinding.yaml29
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/secret.yaml22
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/service.yaml49
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/templates/serviceaccount.yaml11
-rwxr-xr-xvnfs/DAaaS/deploy/visualization/charts/grafana/values.yaml378
-rw-r--r--vnfs/DAaaS/deploy/visualization/templates/NOTES.txt37
-rw-r--r--vnfs/DAaaS/deploy/visualization/values.yaml54
356 files changed, 39684 insertions, 0 deletions
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/.helmignore b/vnfs/DAaaS/deploy/00-init/gloo/.helmignore
new file mode 100755
index 00000000..08c5989a
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/.helmignore
@@ -0,0 +1,28 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+
+# template files
+*-template.yaml
+
+# generator files
+*.go
+generate/
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/Chart.yaml b/vnfs/DAaaS/deploy/00-init/gloo/Chart.yaml
new file mode 100755
index 00000000..4f5e9315
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/Chart.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+description: Gloo Helm chart for Kubernetes
+home: https://gloo.solo.io/
+icon: https://raw.githubusercontent.com/solo-io/gloo/master/docs/img/Gloo-01.png
+name: gloo
+sources:
+- https://github.com/solo-io/gloo
+version: 0.13.18
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/0-namespace.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/0-namespace.yaml
new file mode 100755
index 00000000..92a37f9d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/0-namespace.yaml
@@ -0,0 +1,10 @@
+{{- if .Values.namespace.create -}}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Release.Namespace }}
+ labels:
+ app: gloo
+ annotations:
+ "helm.sh/hook": pre-install
+{{- end}} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/10-ingress-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/10-ingress-deployment.yaml
new file mode 100755
index 00000000..7314b4e3
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/10-ingress-deployment.yaml
@@ -0,0 +1,40 @@
+{{- if or (.Values.ingress.enabled) (.Values.settings.integrations.knative.enabled) }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: ingress
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.ingress.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: ingress
+ template:
+ metadata:
+ labels:
+ gloo: ingress
+ spec:
+ containers:
+ - image: "{{ .Values.ingress.deployment.image.repository }}:{{ .Values.ingress.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.ingress.deployment.image.pullPolicy }}
+ name: ingress
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+{{- if .Values.settings.integrations.knative.enabled }}
+ - name: "ENABLE_KNATIVE_INGRESS"
+ value: "true"
+{{- end }}
+
+{{- if not (.Values.ingress.enabled) }}
+ - name: "DISABLE_KUBE_INGRESS"
+ value: "true"
+{{- end }}
+
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/100-gloo-crds.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/100-gloo-crds.yaml
new file mode 100755
index 00000000..2c111170
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/100-gloo-crds.yaml
@@ -0,0 +1,111 @@
+{{- if .Values.crds.create }}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: settings.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ gloo: settings
+spec:
+ group: gloo.solo.io
+ names:
+ kind: Settings
+ listKind: SettingsList
+ plural: settings
+ shortNames:
+ - st
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: gateways.gateway.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gateway.solo.io
+ names:
+ kind: Gateway
+ listKind: GatewayList
+ plural: gateways
+ shortNames:
+ - gw
+ singular: gateway
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: virtualservices.gateway.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gateway.solo.io
+ names:
+ kind: VirtualService
+ listKind: VirtualServiceList
+ plural: virtualservices
+ shortNames:
+ - vs
+ singular: virtualservice
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: proxies.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gloo.solo.io
+ names:
+ kind: Proxy
+ listKind: ProxyList
+ plural: proxies
+ shortNames:
+ - px
+ singular: proxy
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: upstreams.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gloo.solo.io
+ names:
+ kind: Upstream
+ listKind: UpstreamList
+ plural: upstreams
+ shortNames:
+ - us
+ singular: upstream
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: upstreamgroups.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gloo.solo.io
+ names:
+ kind: UpstreamGroup
+ listKind: UpstreamGroupList
+ plural: upstreamgroups
+ shortNames:
+ - ug
+ singular: upstreamgroup
+ scope: Namespaced
+ version: v1
+---
+{{- end}} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/101-knative-crds-0.5.1.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/101-knative-crds-0.5.1.yaml
new file mode 100755
index 00000000..3c9987ef
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/101-knative-crds-0.5.1.yaml
@@ -0,0 +1,343 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+
+---
+# ↓ required as knative dependency on istio crds is hard-coded right now ↓
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: virtualservices.networking.istio.io
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ app: istio-pilot
+spec:
+ group: networking.istio.io
+ names:
+ kind: VirtualService
+ listKind: VirtualServiceList
+ plural: virtualservices
+ singular: virtualservice
+ categories:
+ - istio-io
+ - networking-istio-io
+ scope: Namespaced
+ version: v1alpha3
+
+# ↑ required as knative dependency on istio crds is hard-coded right now ↑
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: certificates.networking.internal.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.conditions[?(@.type=="Ready")].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=="Ready")].reason
+ name: Reason
+ type: string
+ group: networking.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - networking
+ kind: Certificate
+ plural: certificates
+ shortNames:
+ - kcert
+ singular: certificate
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: clusteringresses.networking.internal.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: networking.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - networking
+ kind: ClusterIngress
+ plural: clusteringresses
+ singular: clusteringress
+ scope: Cluster
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: configurations.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.latestCreatedRevisionName
+ name: LatestCreated
+ type: string
+ - JSONPath: .status.latestReadyRevisionName
+ name: LatestReady
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Configuration
+ plural: configurations
+ shortNames:
+ - config
+ - cfg
+ singular: configuration
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ name: images.caching.internal.knative.dev
+spec:
+ group: caching.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - caching
+ kind: Image
+ plural: images
+ shortNames:
+ - img
+ singular: image
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: podautoscalers.autoscaling.internal.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: autoscaling.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - autoscaling
+ kind: PodAutoscaler
+ plural: podautoscalers
+ shortNames:
+ - kpa
+ singular: podautoscaler
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: revisions.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.serviceName
+ name: Service Name
+ type: string
+ - JSONPath: .metadata.labels['serving\.knative\.dev/configurationGeneration']
+ name: Generation
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Revision
+ plural: revisions
+ shortNames:
+ - rev
+ singular: revision
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: routes.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.domain
+ name: Domain
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Route
+ plural: routes
+ shortNames:
+ - rt
+ singular: route
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: services.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.domain
+ name: Domain
+ type: string
+ - JSONPath: .status.latestCreatedRevisionName
+ name: LatestCreated
+ type: string
+ - JSONPath: .status.latestReadyRevisionName
+ name: LatestReady
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Service
+ plural: services
+ shortNames:
+ - kservice
+ - ksvc
+ singular: service
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: serverlessservices.networking.internal.knative.dev
+spec:
+ group: networking.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - networking
+ kind: ServerlessService
+ plural: serverlessservices
+ shortNames:
+ - sks
+ singular: serverlessservice
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/11-ingress-proxy-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/11-ingress-proxy-deployment.yaml
new file mode 100755
index 00000000..5dc131e5
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/11-ingress-proxy-deployment.yaml
@@ -0,0 +1,65 @@
+{{- if .Values.ingress.enabled }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: ingress-proxy
+ name: ingress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.ingressProxy.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: ingress-proxy
+ template:
+ metadata:
+ labels:
+ gloo: ingress-proxy
+{{- with .Values.ingressProxy.deployment.extraAnnotations }}
+ annotations:
+{{toYaml . | indent 8}}{{- end }}
+ spec:
+ containers:
+ - args: ["--disable-hot-restart"]
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: "{{ .Values.ingressProxy.deployment.image.repository }}:{{ .Values.ingressProxy.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.ingressProxy.deployment.image.pullPolicy }}
+ name: ingress-proxy
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ ports:
+ - containerPort: {{ .Values.ingressProxy.deployment.httpPort }}
+ name: http
+ protocol: TCP
+ - containerPort: {{ .Values.ingressProxy.deployment.httpsPort }}
+ name: https
+ protocol: TCP
+{{- with .Values.ingressProxy.deployment.extraPorts }}
+{{toYaml . | indent 8}}{{- end }}
+ volumeMounts:
+ - mountPath: /etc/envoy
+ name: envoy-config
+ {{- if .Values.ingressProxy.deployment.image.pullSecret }}
+ imagePullSecrets:
+ - name: {{ .Values.ingressProxy.deployment.image.pullSecret }}{{end}}
+ volumes:
+ - configMap:
+ name: ingress-envoy-config
+ name: envoy-config
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/12-ingress-proxy-configmap.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/12-ingress-proxy-configmap.yaml
new file mode 100755
index 00000000..8938a477
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/12-ingress-proxy-configmap.yaml
@@ -0,0 +1,52 @@
+{{- if .Values.ingress.enabled }}
+# configmap
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ingress-envoy-config
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: gateway-proxy
+data:
+{{ if (empty .Values.ingressProxy.configMap.data) }}
+ envoy.yaml: |
+ node:
+ cluster: ingress
+ id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+ metadata:
+ # this line must match !
+ role: "{{ "{{" }}.PodNamespace{{ "}}" }}~ingress-proxy"
+ static_resources:
+ clusters:
+ - name: xds_cluster
+ connect_timeout: 5.000s
+ load_assignment:
+ cluster_name: xds_cluster
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: gloo
+ port_value: {{ .Values.gloo.deployment.xdsPort }}
+ http2_protocol_options: {}
+ type: STRICT_DNS
+ dynamic_resources:
+ ads_config:
+ api_type: GRPC
+ grpc_services:
+ - envoy_grpc: {cluster_name: xds_cluster}
+ cds_config:
+ ads: {}
+ lds_config:
+ ads: {}
+ admin:
+ access_log_path: /dev/null
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 19000
+{{- else}}{{ toYaml .Values.ingressProxy.configMap.data | indent 2}}{{- end}}
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/13-ingress-proxy-service.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/13-ingress-proxy-service.yaml
new file mode 100755
index 00000000..583e8bcd
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/13-ingress-proxy-service.yaml
@@ -0,0 +1,23 @@
+{{- if .Values.ingress.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: ingress-proxy
+ name: ingress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ ports:
+ - port: {{ .Values.ingressProxy.deployment.httpPort }}
+ protocol: TCP
+ name: http
+ - port: {{ .Values.ingressProxy.deployment.httpsPort }}
+ protocol: TCP
+ name: https
+ selector:
+ gloo: ingress-proxy
+ type: LoadBalancer
+
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml
new file mode 100755
index 00000000..fb7874eb
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml
@@ -0,0 +1,58 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: clusteringress-proxy
+ name: clusteringress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.settings.integrations.knative.proxy.replicas }}
+ selector:
+ matchLabels:
+ gloo: clusteringress-proxy
+ template:
+ metadata:
+ labels:
+ gloo: clusteringress-proxy
+ spec:
+ containers:
+ - args: ["--disable-hot-restart"]
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: {{ .Values.settings.integrations.knative.proxy.image.repository }}:{{ .Values.settings.integrations.knative.proxy.image.tag }}
+ imagePullPolicy: {{ .Values.settings.integrations.knative.proxy.image.pullPolicy }}
+ name: clusteringress-proxy
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ ports:
+ - containerPort: {{ .Values.settings.integrations.knative.proxy.httpPort }}
+ name: http
+ protocol: TCP
+ - containerPort: {{ .Values.settings.integrations.knative.proxy.httpsPort }}
+ name: https
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /etc/envoy
+ name: envoy-config
+ volumes:
+ - configMap:
+ name: clusteringress-envoy-config
+ name: envoy-config
+
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml
new file mode 100755
index 00000000..85a6421f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml
@@ -0,0 +1,49 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+# configmap
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: clusteringress-envoy-config
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: clusteringress-proxy
+data:
+ envoy.yaml: |
+ node:
+ cluster: clusteringress
+ id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+ metadata:
+ # this line must match !
+ role: "{{ "{{" }}.PodNamespace{{ "}}" }}~clusteringress-proxy"
+ static_resources:
+ clusters:
+ - name: xds_cluster
+ connect_timeout: 5.000s
+ load_assignment:
+ cluster_name: xds_cluster
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: gloo
+ port_value: {{ .Values.gloo.deployment.xdsPort }}
+ http2_protocol_options: {}
+ type: STRICT_DNS
+ dynamic_resources:
+ ads_config:
+ api_type: GRPC
+ grpc_services:
+ - envoy_grpc: {cluster_name: xds_cluster}
+ cds_config:
+ ads: {}
+ lds_config:
+ ads: {}
+ admin:
+ access_log_path: /dev/null
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 19000
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/16-clusteringress-proxy-service.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/16-clusteringress-proxy-service.yaml
new file mode 100755
index 00000000..7e25bee9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/16-clusteringress-proxy-service.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: clusteringress-proxy
+ name: clusteringress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ ports:
+ - port: {{ .Values.settings.integrations.knative.proxy.httpPort }}
+ protocol: TCP
+ name: http
+ - port: {{ .Values.settings.integrations.knative.proxy.httpsPort }}
+ protocol: TCP
+ name: https
+ selector:
+ gloo: clusteringress-proxy
+ type: LoadBalancer
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml
new file mode 100755
index 00000000..a73cf1f2
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml
@@ -0,0 +1,982 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app: gloo
+ istio-injection: enabled
+ serving.knative.dev/release: devel
+ name: knative-serving
+
+---
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ serving.knative.dev/controller: "true"
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: knative-serving-admin
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ serving.knative.dev/controller: "true"
+ serving.knative.dev/release: devel
+ name: knative-serving-core
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - namespaces
+ - secrets
+ - configmaps
+ - endpoints
+ - services
+ - events
+ - serviceaccounts
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - ingresses
+ - deployments
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ - deployments/scale
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - serving.knative.dev
+ resources:
+ - configurations
+ - routes
+ - revisions
+ - services
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - serving.knative.dev
+ resources:
+ - configurations/status
+ - routes/status
+ - revisions/status
+ - services/status
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - autoscaling.internal.knative.dev
+ resources:
+ - podautoscalers
+ - podautoscalers/status
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - caching.internal.knative.dev
+ resources:
+ - images
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - networking.internal.knative.dev
+ resources:
+ - clusteringresses
+ - clusteringresses/status
+ - serverlessservices
+ - serverlessservices/status
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - deletecollection
+ - patch
+ - watch
+ - apiGroups:
+ - networking.istio.io
+ resources:
+ - virtualservices
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: controller
+ namespace: knative-serving
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: knative-serving-controller-admin
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: knative-serving-admin
+subjects:
+ - kind: ServiceAccount
+ name: controller
+ namespace: knative-serving
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: activator
+ serving.knative.dev/release: devel
+ name: activator-service
+ namespace: knative-serving
+spec:
+ ports:
+ - name: http
+ nodePort: null
+ port: 80
+ protocol: TCP
+ targetPort: 8080
+ - name: http2
+ port: 81
+ protocol: TCP
+ targetPort: 8081
+ - name: metrics
+ nodePort: null
+ port: 9090
+ protocol: TCP
+ targetPort: 9090
+ selector:
+ app: activator
+ type: ClusterIP
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: controller
+ serving.knative.dev/release: devel
+ name: controller
+ namespace: knative-serving
+spec:
+ ports:
+ - name: metrics
+ port: 9090
+ protocol: TCP
+ targetPort: 9090
+ selector:
+ app: controller
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ role: webhook
+ serving.knative.dev/release: devel
+ name: webhook
+ namespace: knative-serving
+spec:
+ ports:
+ - port: 443
+ targetPort: 443
+ selector:
+ role: webhook
+
+---
+apiVersion: caching.internal.knative.dev/v1alpha1
+kind: Image
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: queue-proxy
+ namespace: knative-serving
+spec:
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/queue@sha256:b5c759e4ea6f36ae4498c1ec794653920345b9ad7492731fb1d6087e3b95dc43
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: activator
+ namespace: knative-serving
+spec:
+ selector:
+ matchLabels:
+ app: activator
+ role: activator
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "true"
+ labels:
+ app: activator
+ role: activator
+ serving.knative.dev/release: devel
+ spec:
+ containers:
+ - args:
+ - -logtostderr=false
+ - -stderrthreshold=FATAL
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/activator@sha256:60630ac88d8cb67debd1e2ab1ecd6ec3ff6cbab2336dda8e7ae1c01ebead76c0
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8080
+ name: activator
+ ports:
+ - containerPort: 8080
+ name: http1-port
+ - containerPort: 8081
+ name: h2c-port
+ - containerPort: 9090
+ name: metrics-port
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: 8080
+ resources:
+ limits:
+ cpu: 200m
+ memory: 600Mi
+ requests:
+ cpu: 20m
+ memory: 60Mi
+ volumeMounts:
+ - mountPath: /etc/config-logging
+ name: config-logging
+ - mountPath: /etc/config-observability
+ name: config-observability
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-logging
+ name: config-logging
+ - configMap:
+ name: config-observability
+ name: config-observability
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: autoscaler
+ serving.knative.dev/release: devel
+ name: autoscaler
+ namespace: knative-serving
+spec:
+ ports:
+ - name: http
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: metrics
+ port: 9090
+ protocol: TCP
+ targetPort: 9090
+ selector:
+ app: autoscaler
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: autoscaler
+ namespace: knative-serving
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: autoscaler
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "true"
+ labels:
+ app: autoscaler
+ spec:
+ containers:
+ - env:
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/autoscaler@sha256:442f99e3a55653b19137b44c1d00f681b594d322cb39c1297820eb717e2134ba
+ name: autoscaler
+ ports:
+ - containerPort: 8080
+ name: websocket
+ - containerPort: 9090
+ name: metrics
+ resources:
+ limits:
+ cpu: 300m
+ memory: 400Mi
+ requests:
+ cpu: 30m
+ memory: 40Mi
+ volumeMounts:
+ - mountPath: /etc/config-autoscaler
+ name: config-autoscaler
+ - mountPath: /etc/config-logging
+ name: config-logging
+ - mountPath: /etc/config-observability
+ name: config-observability
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-autoscaler
+ name: config-autoscaler
+ - configMap:
+ name: config-logging
+ name: config-logging
+ - configMap:
+ name: config-observability
+ name: config-observability
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # The Revision ContainerConcurrency field specifies the maximum number
+ # of requests the Container can handle at once. Container concurrency
+ # target percentage is how much of that maximum to use in a stable
+ # state. E.g. if a Revision specifies ContainerConcurrency of 10, then
+ # the Autoscaler will try to maintain 7 concurrent connections per pod
+ # on average. A value of 0.7 is chosen because the Autoscaler panics
+ # when concurrency exceeds 2x the desired set point. So we will panic
+ # before we reach the limit.
+ container-concurrency-target-percentage: "1.0"
+
+ # The container concurrency target default is what the Autoscaler will
+ # try to maintain when the Revision specifies unlimited concurrency.
+ # Even when specifying unlimited concurrency, the autoscaler will
+ # horizontally scale the application based on this target concurrency.
+ #
+ # A value of 100 is chosen because it's enough to allow vertical pod
+ # autoscaling to tune resource requests. E.g. maintaining 1 concurrent
+ # "hello world" request doesn't consume enough resources to allow VPA
+ # to achieve efficient resource usage (VPA CPU minimum is 300m).
+ container-concurrency-target-default: "100"
+
+ # When operating in a stable mode, the autoscaler operates on the
+ # average concurrency over the stable window.
+ stable-window: "60s"
+
+ # When observed average concurrency during the panic window reaches 2x
+ # the target concurrency, the autoscaler enters panic mode. When
+ # operating in panic mode, the autoscaler operates on the average
+ # concurrency over the panic window.
+ panic-window: "6s"
+
+ # Max scale up rate limits the rate at which the autoscaler will
+ # increase pod count. It is the maximum ratio of desired pods versus
+ # observed pods.
+ max-scale-up-rate: "10"
+
+ # Scale to zero feature flag
+ enable-scale-to-zero: "true"
+
+ # Tick interval is the time between autoscaling calculations.
+ tick-interval: "2s"
+
+ # Dynamic parameters (take effect when config map is updated):
+
+ # Scale to zero grace period is the time an inactive revision is left
+ # running before it is scaled to zero (min: 30s).
+ scale-to-zero-grace-period: "30s"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-autoscaler
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # List of repositories for which tag to digest resolving should be skipped
+ registriesSkippingTagResolving: "ko.local,dev.local"
+ queueSidecarImage: gcr.io/knative-releases/github.com/knative/serving/cmd/queue@sha256:b5c759e4ea6f36ae4498c1ec794653920345b9ad7492731fb1d6087e3b95dc43
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-controller
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # revision-timeout-seconds contains the default number of
+ # seconds to use for the revision's per-request timeout, if
+ # none is specified.
+ revision-timeout-seconds: "300" # 5 minutes
+
+ # revision-cpu-request contains the cpu allocation to assign
+ # to revisions by default.
+ revision-cpu-request: "400m" # 0.4 of a CPU (aka 400 milli-CPU)
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-defaults
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # Default value for domain.
+ # Although it will match all routes, it is the least-specific rule so it
+ # will only be used if no other domain matches.
+ example.com: |
+
+ # These are example settings of domain.
+ # example.org will be used for routes having app=nonprofit.
+ example.org: |
+ selector:
+ app: nonprofit
+
+ # Routes having domain suffix of 'svc.cluster.local' will not be exposed
+ # through Ingress. You can define your own label selector to assign that
+ # domain suffix to your Route here, or you can set the label
+ # "serving.knative.dev/visibility=cluster-local"
+ # to achieve the same effect. This shows how to make routes having
+ # the label app=secret only exposed to the local cluster.
+ svc.cluster.local: |
+ selector:
+ app: secret
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-domain
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # Delay after revision creation before considering it for GC
+ stale-revision-create-delay: "24h"
+
+ # Duration since a route has been pointed at a revision before it should be GC'd
+ # This minus lastpinned-debounce be longer than the controller resync period (10 hours)
+ stale-revision-timeout: "15h"
+
+ # Minimum number of generations of revisions to keep before considering for GC
+ stale-revision-minimum-generations: "1"
+
+ # To avoid constant updates, we allow an existing annotation to be stale by this
+ # amount before we update the timestamp
+ stale-revision-lastpinned-debounce: "5h"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-gc
+ namespace: knative-serving
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ networking.knative.dev/ingress-provider: istio
+ serving.knative.dev/release: devel
+ name: config-istio
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # Common configuration for all Knative codebase
+ zap-logger-config: |
+ {
+ "level": "info",
+ "development": false,
+ "outputPaths": ["stdout"],
+ "errorOutputPaths": ["stderr"],
+ "encoding": "json",
+ "encoderConfig": {
+ "timeKey": "ts",
+ "levelKey": "level",
+ "nameKey": "logger",
+ "callerKey": "caller",
+ "messageKey": "msg",
+ "stacktraceKey": "stacktrace",
+ "lineEnding": "",
+ "levelEncoder": "",
+ "timeEncoder": "iso8601",
+ "durationEncoder": "",
+ "callerEncoder": ""
+ }
+ }
+
+ # Log level overrides
+ # For all components except the autoscaler and queue proxy,
+ # changes are be picked up immediately.
+ # For autoscaler and queue proxy, changes require recreation of the pods.
+ loglevel.controller: "info"
+ loglevel.autoscaler: "info"
+ loglevel.queueproxy: "info"
+ loglevel.webhook: "info"
+ loglevel.activator: "info"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-logging
+ namespace: knative-serving
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-network
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # logging.enable-var-log-collection defaults to false.
+ # A fluentd sidecar will be set up to collect var log if
+ # this flag is true.
+ logging.enable-var-log-collection: false
+
+ # logging.fluentd-sidecar-image provides the fluentd sidecar image
+ # to inject as a sidecar to collect logs from /var/log.
+ # Must be presented if logging.enable-var-log-collection is true.
+ logging.fluentd-sidecar-image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4
+
+ # logging.fluentd-sidecar-output-config provides the configuration
+ # for the fluentd sidecar, which will be placed into a configmap and
+ # mounted into the fluentd sidecar image.
+ logging.fluentd-sidecar-output-config: |
+ # Parse json log before sending to Elastic Search
+ <filter **>
+ @type parser
+ key_name log
+ <parse>
+ @type multi_format
+ <pattern>
+ format json
+ time_key fluentd-time # fluentd-time is reserved for structured logs
+ time_format %Y-%m-%dT%H:%M:%S.%NZ
+ </pattern>
+ <pattern>
+ format none
+ message_key log
+ </pattern>
+ </parse>
+ </filter>
+ # Send to Elastic Search
+ <match **>
+ @id elasticsearch
+ @type elasticsearch
+ @log_level info
+ include_tag_key true
+ # Elasticsearch service is in monitoring namespace.
+ host elasticsearch-logging.knative-monitoring
+ port 9200
+ logstash_format true
+ <buffer>
+ @type file
+ path /var/log/fluentd-buffers/kubernetes.system.buffer
+ flush_mode interval
+ retry_type exponential_backoff
+ flush_thread_count 2
+ flush_interval 5s
+ retry_forever
+ retry_max_interval 30
+ chunk_limit_size 2M
+ queue_limit_length 8
+ overflow_action block
+ </buffer>
+ </match>
+
+ # logging.revision-url-template provides a template to use for producing the
+ # logging URL that is injected into the status of each Revision.
+ # This value is what you might use the the Knative monitoring bundle, and provides
+ # access to Kibana after setting up kubectl proxy.
+ logging.revision-url-template: |
+ http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase))))
+
+ # If non-empty, this enables queue proxy writing request logs to stdout.
+ # The value determines the shape of the request logs and it must be a valid go text/template.
+ # It is important to keep this as a single line. Multiple lines are parsed as separate entities
+ # by most collection agents and will split the request logs into multiple records.
+ #
+ # The following fields and functions are available to the template:
+ #
+ # Request: An http.Request (see https://golang.org/pkg/net/http/#Request)
+ # representing an HTTP request received by the server.
+ #
+ # Response:
+ # struct {
+ # Code int // HTTP status code (see https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml)
+ # Size int // An int representing the size of the response.
+ # Latency float64 // A float64 representing the latency of the response in seconds.
+ # }
+ #
+ # Revision:
+ # struct {
+ # Name string // Knative revision name
+ # Namespace string // Knative revision namespace
+ # Service string // Knative service name
+ # Configuration string // Knative configuration name
+ # PodName string // Name of the pod hosting the revision
+ # PodIP string // IP of the pod hosting the revision
+ # }
+ #
+ logging.request-log-template: '{"httpRequest": {"requestMethod": "{{ "{{" }}.Request.Method{{ "{{" }}", "requestUrl": "{{ "{{" }}js .Request.RequestURI{{ "{{" }}", "requestSize": "{{ "{{" }}.Request.ContentLength{{ "{{" }}", "status": {{ "{{" }}.Response.Code{{ "{{" }}, "responseSize": "{{ "{{" }}.Response.Size{{ "{{" }}", "userAgent": "{{ "{{" }}js .Request.UserAgent{{ "{{" }}", "remoteIp": "{{ "{{" }}js .Request.RemoteAddr{{ "{{" }}", "serverIp": "{{ "{{" }}.Revision.PodIP{{ "{{" }}", "referer": "{{ "{{" }}js .Request.Referer{{ "{{" }}", "latency": "{{ "{{" }}.Response.Latency{{ "{{" }}s", "protocol": "{{ "{{" }}.Request.Proto{{ "{{" }}"}, "traceId": "{{ "{{" }}index .Request.Header "X-B3-Traceid"{{ "{{" }}"}'
+
+ # metrics.backend-destination field specifies the system metrics destination.
+ # It supports either prometheus (the default) or stackdriver.
+ # Note: Using stackdriver will incur additional charges
+ metrics.backend-destination: prometheus
+
+ # metrics.request-metrics-backend-destination specifies the request metrics
+ # destination. If non-empty, it enables queue proxy to send request metrics.
+ # Currently supported values: prometheus, stackdriver.
+ metrics.request-metrics-backend-destination: prometheus
+
+ # metrics.stackdriver-project-id field specifies the stackdriver project ID. This
+ # field is optional. When running on GCE, application default credentials will be
+ # used if this field is not provided.
+ metrics.stackdriver-project-id: "<your stackdriver project id>"
+
+ # metrics.allow-stackdriver-custom-metrics indicates whether it is allowed to send metrics to
+ # Stackdriver using "global" resource type and custom metric type if the
+ # metrics are not supported by "knative_revision" resource type. Setting this
+ # flag to "true" could cause extra Stackdriver charge.
+ # If metrics.backend-destination is not Stackdriver, this is ignored.
+ metrics.allow-stackdriver-custom-metrics: "false"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-observability
+ namespace: knative-serving
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: controller
+ namespace: knative-serving
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: controller
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "false"
+ labels:
+ app: controller
+ spec:
+ containers:
+ - env:
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/controller@sha256:25af5f3adad8b65db3126e0d6e90aa36835c124c24d9d72ffbdd7ee739a7f571
+ name: controller
+ ports:
+ - containerPort: 9090
+ name: metrics
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 1000Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ volumeMounts:
+ - mountPath: /etc/config-logging
+ name: config-logging
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-logging
+ name: config-logging
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: webhook
+ namespace: knative-serving
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: webhook
+ role: webhook
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "false"
+ labels:
+ app: webhook
+ role: webhook
+ spec:
+ containers:
+ - env:
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/webhook@sha256:d1ba3e2c0d739084ff508629db001619cea9cc8780685e85dd910363774eaef6
+ name: webhook
+ resources:
+ limits:
+ cpu: 200m
+ memory: 200Mi
+ requests:
+ cpu: 20m
+ memory: 20Mi
+ volumeMounts:
+ - mountPath: /etc/config-logging
+ name: config-logging
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-logging
+ name: config-logging
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/18-settings.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/18-settings.yaml
new file mode 100755
index 00000000..a2eec087
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/18-settings.yaml
@@ -0,0 +1,30 @@
+{{ if .Values.settings.create }}
+
+apiVersion: gloo.solo.io/v1
+kind: Settings
+metadata:
+ name: default
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install
+spec:
+ bindAddr: 0.0.0.0:{{ .Values.gloo.deployment.xdsPort }}
+ discoveryNamespace: {{ .Values.settings.writeNamespace }}
+ kubernetesArtifactSource: {}
+ kubernetesConfigSource: {}
+ kubernetesSecretSource: {}
+ refreshRate: 60s
+
+{{- if .Values.settings.extensions }}
+ extensions:
+{{- toYaml .Values.settings.extensions | nindent 4 }}
+{{- end }}
+
+{{- with .Values.settings.watchNamespaces }}
+ watchNamespaces:
+ {{- range . }}
+ - {{ . }}
+ {{- end }}
+{{- end }}
+
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml
new file mode 100755
index 00000000..35fb5eb0
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.gateway.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-gateway
+ labels:
+ app: gloo
+ gloo: rbac
+rules:
+- apiGroups: [""]
+ resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+ resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+ verbs: ["*"]
+- apiGroups: ["gateway.solo.io"]
+ resources: ["virtualservices", "gateways"]
+ verbs: ["*"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml
new file mode 100755
index 00000000..15215b9f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.ingress.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-ingress
+ labels:
+ app: gloo
+ gloo: rbac
+rules:
+- apiGroups: [""]
+ resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+ resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+ verbs: ["*"]
+- apiGroups: ["extensions", ""]
+ resources: ["ingresses"]
+ verbs: ["*"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml
new file mode 100755
index 00000000..1bd2b95d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.settings.integrations.knative.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-knative
+ labels:
+ app: gloo
+ gloo: rbac
+rules:
+- apiGroups: [""]
+ resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+ resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+ verbs: ["*"]
+- apiGroups: ["networking.internal.knative.dev"]
+ resources: ["clusteringresses"]
+ verbs: ["get", "list", "watch"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml
new file mode 100755
index 00000000..62198913
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.gateway.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-binding-gateway-{{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: rbac
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: gloo-role-gateway
+ apiGroup: rbac.authorization.k8s.io
+
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml
new file mode 100755
index 00000000..7ef5cbae
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.ingress.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-binding-ingress-{{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: rbac
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: gloo-role-ingress
+ apiGroup: rbac.authorization.k8s.io
+
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml
new file mode 100755
index 00000000..5f05de96
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.settings.integrations.knative.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-binding-knative-{{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: rbac
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: gloo-role-knative
+ apiGroup: rbac.authorization.k8s.io
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/3-gloo-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/3-gloo-deployment.yaml
new file mode 100755
index 00000000..b3d8423f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/3-gloo-deployment.yaml
@@ -0,0 +1,57 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: gloo
+ name: gloo
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.gloo.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: gloo
+ template:
+ metadata:
+ labels:
+ gloo: gloo
+ {{- if .Values.gloo.deployment.stats }}
+ annotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "9091"
+ prometheus.io/scrape: "true"
+ {{- end}}
+ spec:
+ containers:
+ - image: "{{ .Values.gloo.deployment.image.repository }}:{{ .Values.gloo.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.gloo.deployment.image.pullPolicy }}
+ name: gloo
+ resources:
+ requests:
+ cpu: 1
+ memory: 256Mi
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 10101
+ capabilities:
+ drop:
+ - ALL
+ ports:
+ - containerPort: {{ .Values.gloo.deployment.xdsPort }}
+ name: grpc
+ protocol: TCP
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.gloo.deployment.stats }}
+ - name: START_STATS_SERVER
+ value: "true"
+ {{- end}}
+ {{- if .Values.gloo.deployment.image.pullSecret }}
+ imagePullSecrets:
+ - name: {{ .Values.gloo.deployment.image.pullSecret }}{{end}}
+
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/4-gloo-service.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/4-gloo-service.yaml
new file mode 100755
index 00000000..ab49ea3f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/4-gloo-service.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: gloo
+ name: gloo
+ namespace: {{ .Release.Namespace }}
+spec:
+{{ if .Values.gloo.deployment.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.gloo.deployment.externalTrafficPolicy }}
+{{- end }}
+ ports:
+ - name: grpc
+ port: {{ .Values.gloo.deployment.xdsPort }}
+ protocol: TCP
+ selector:
+ gloo: gloo
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/5-discovery-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/5-discovery-deployment.yaml
new file mode 100755
index 00000000..1a44e922
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/5-discovery-deployment.yaml
@@ -0,0 +1,46 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: discovery
+ name: discovery
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.discovery.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: discovery
+ template:
+ metadata:
+ labels:
+ gloo: discovery
+ {{- if .Values.discovery.deployment.stats }}
+ annotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "9091"
+ prometheus.io/scrape: "true"
+ {{- end}}
+ spec:
+ containers:
+ - image: "{{ .Values.discovery.deployment.image.repository }}:{{ .Values.discovery.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.discovery.deployment.image.pullPolicy }}
+ name: discovery
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 10101
+ capabilities:
+ drop:
+ - ALL
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.discovery.deployment.stats }}
+ - name: START_STATS_SERVER
+ value: "true"
+ {{- end}}
+
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/6-gateway-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/6-gateway-deployment.yaml
new file mode 100755
index 00000000..0a32241e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/6-gateway-deployment.yaml
@@ -0,0 +1,47 @@
+{{- if .Values.gateway.enabled }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: gateway
+ name: gateway
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.gateway.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: gateway
+ template:
+ metadata:
+ labels:
+ gloo: gateway
+ {{- if .Values.gateway.deployment.stats }}
+ annotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "9091"
+ prometheus.io/scrape: "true"
+ {{- end}}
+ spec:
+ containers:
+ - image: "{{ .Values.gateway.deployment.image.repository }}:{{ .Values.gateway.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.gateway.deployment.image.pullPolicy }}
+ name: gateway
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 10101
+ capabilities:
+ drop:
+ - ALL
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.gateway.deployment.stats }}
+ - name: START_STATS_SERVER
+ value: "true"
+ {{- end}}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/7-gateway-proxy-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/7-gateway-proxy-deployment.yaml
new file mode 100755
index 00000000..bb54e8f3
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/7-gateway-proxy-deployment.yaml
@@ -0,0 +1,67 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: {{ $key }}
+ name: {{ $key }}
+ namespace: {{ $.Release.Namespace }}
+spec:
+ replicas: {{ $spec.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: {{ $key }}
+ template:
+ metadata:
+ labels:
+ gloo: {{ $key }}
+{{- with $spec.deployment.extraAnnotations }}
+ annotations:
+{{toYaml . | indent 8}}{{- end }}
+ spec:
+ containers:
+ - args: ["--disable-hot-restart"]
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: {{ $spec.deployment.image.repository }}:{{ $spec.deployment.image.tag }}
+ imagePullPolicy: {{ $spec.deployment.image.pullPolicy }}
+ name: gateway-proxy
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ ports:
+ - containerPort: {{ $spec.deployment.httpPort }}
+ name: http
+ protocol: TCP
+ - containerPort: {{ $spec.deployment.httpsPort }}
+ name: https
+ protocol: TCP
+{{- with $spec.deployment.extraPorts }}
+{{toYaml . | indent 8}}{{- end }}
+ volumeMounts:
+ - mountPath: /etc/envoy
+ name: envoy-config
+ {{- if $spec.deployment.image.pullSecret }}
+ imagePullSecrets:
+ - name: {{ $spec.deployment.image.pullSecret }}{{end}}
+ volumes:
+ - configMap:
+ name: {{ $key }}-envoy-config
+ name: envoy-config
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/8-gateway-proxy-service.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/8-gateway-proxy-service.yaml
new file mode 100755
index 00000000..f0b7d347
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/8-gateway-proxy-service.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: {{ $key }}
+ name: {{ $key }}
+ namespace: {{ $.Release.Namespace }}
+ {{- with $spec.service.extraAnnotations }}
+ annotations:
+{{toYaml . | indent 8}}{{- end }}
+spec:
+ ports:
+ - port: {{ $spec.service.httpPort }}
+ targetPort: {{ $spec.deployment.httpPort }}
+ protocol: TCP
+ name: http
+ - port: {{ $spec.service.httpsPort }}
+ targetPort: {{ $spec.deployment.httpsPort }}
+ protocol: TCP
+ name: https
+ selector:
+ gloo: {{ $key }}
+ type: {{ $spec.service.type }}
+ {{- if and (eq $spec.service.type "ClusterIP") $spec.service.clusterIP }}
+ clusterIP: {{ $spec.service.clusterIP }}
+ {{- end }}
+ {{- if and (eq $spec.service.type "LoadBalancer") $spec.service.loadBalancerIP }}
+ loadBalancerIP: {{ $spec.service.loadBalancerIP }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/9-gateway-proxy-configmap.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/9-gateway-proxy-configmap.yaml
new file mode 100755
index 00000000..03c5a920
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/9-gateway-proxy-configmap.yaml
@@ -0,0 +1,54 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+# config_map
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ $key }}-envoy-config
+ namespace: {{ $.Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: {{ $key }}
+data:
+{{ if (empty $spec.configMap.data) }}
+ envoy.yaml: |
+ node:
+ cluster: gateway
+ id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+ metadata:
+ # this line must match !
+ role: "{{ "{{" }}.PodNamespace{{ "}}" }}~gateway-proxy"
+ static_resources:
+ clusters:
+ - name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}
+ connect_timeout: 5.000s
+ load_assignment:
+ cluster_name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: gloo.{{ $.Release.Namespace }}.svc.cluster.local
+ port_value: {{ $.Values.gloo.deployment.xdsPort }}
+ http2_protocol_options: {}
+ type: STRICT_DNS
+ dynamic_resources:
+ ads_config:
+ api_type: GRPC
+ grpc_services:
+ - envoy_grpc: {cluster_name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}}
+ cds_config:
+ ads: {}
+ lds_config:
+ ads: {}
+ admin:
+ access_log_path: /dev/null
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 19000
+{{- else}}{{ toYaml $spec.configMap.data | indent 2}}{{- end}}
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/values-ingress.yaml b/vnfs/DAaaS/deploy/00-init/gloo/values-ingress.yaml
new file mode 100755
index 00000000..98dd42ae
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/values-ingress.yaml
@@ -0,0 +1,74 @@
+crds:
+ create: true
+discovery:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/discovery
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+gateway:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gateway
+ tag: ""
+ replicas: 1
+ stats: false
+ enabled: false
+gatewayProxies:
+ gateway-proxy:
+ configMap:
+ data: null
+ deployment:
+ httpPort: "8080"
+ httpsPort: "8443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: ""
+ replicas: 1
+ stats: false
+ service:
+ httpPort: "80"
+ httpsPort: "443"
+ type: LoadBalancer
+gloo:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ xdsPort: "9977"
+ingress:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/ingress
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ enabled: true
+ingressProxy:
+ configMap: {}
+ deployment:
+ httpPort: "80"
+ httpsPort: "443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+namespace:
+ create: false
+rbac:
+ create: true
+settings:
+ integrations:
+ knative:
+ enabled: false
+ writeNamespace: gloo-system
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/values-knative.yaml b/vnfs/DAaaS/deploy/00-init/gloo/values-knative.yaml
new file mode 100755
index 00000000..c53ca1a9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/values-knative.yaml
@@ -0,0 +1,72 @@
+crds:
+ create: true
+discovery:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/discovery
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+gateway:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gateway
+ tag: ""
+ replicas: 1
+ stats: false
+ enabled: false
+gatewayProxies:
+ gateway-proxy:
+ configMap:
+ data: null
+ deployment:
+ httpPort: "8080"
+ httpsPort: "8443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: ""
+ replicas: 1
+ stats: false
+ service:
+ httpPort: "80"
+ httpsPort: "443"
+ type: LoadBalancer
+gloo:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ xdsPort: "9977"
+ingress:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/ingress
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ enabled: false
+namespace:
+ create: false
+rbac:
+ create: true
+settings:
+ integrations:
+ knative:
+ enabled: true
+ proxy:
+ httpPort: "80"
+ httpsPort: "443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ writeNamespace: gloo-system
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/values.yaml b/vnfs/DAaaS/deploy/00-init/gloo/values.yaml
new file mode 100755
index 00000000..daeab0c3
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/values.yaml
@@ -0,0 +1,56 @@
+crds:
+ create: true
+discovery:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/discovery
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+gateway:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gateway
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ enabled: true
+gatewayProxies:
+ gateway-proxy:
+ configMap:
+ data: null
+ deployment:
+ httpPort: "8080"
+ httpsPort: "8443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ service:
+ httpPort: "80"
+ httpsPort: "443"
+ type: LoadBalancer
+gloo:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ xdsPort: "9977"
+ingress:
+ enabled: false
+namespace:
+ create: false
+rbac:
+ create: true
+settings:
+ integrations:
+ knative:
+ enabled: false
+ writeNamespace: gloo-system
diff --git a/vnfs/DAaaS/deploy/00-init/istio/README.md b/vnfs/DAaaS/deploy/00-init/istio/README.md
new file mode 100644
index 00000000..d19bcce0
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/istio/README.md
@@ -0,0 +1,31 @@
+Istio Installation
+
+1. Download the Source code
+curl -L https://git.io/getLatestIstio | ISTIO_VERSION=1.1.7 sh -
+
+2. Add the ISTIO helm chart repository. “helm repo add istio.io https://storage.googleapis.com/istio-release/releases/1.1.7/charts/”
+
+ NOTE : Make sure the helm client and helm server (tiller) is installed
+
+ Create a namespace istio-system where all the istio components are installed “kubectl create namespace istio-system”
+
+3. Install all the Istio Custom Resource Definitions (CRDs) using kubectl apply
+
+
+ “helm template install/kubernetes/helm/istio-init --name istio-init --namespace istio-system | kubectl apply -f -”.
+
+4. Verify that all 53 Istio CRDs were committed to the Kubernetes api-server using the following command:
+
+ “kubectl get crds | grep 'istio.io\|certmanager.k8s.io' | wc -l”
+
+5. Install istio with the sds as the configuration profile.
+
+ “helm template install/kubernetes/helm/istio --name istio --namespace istio-system --values install/kubernetes/helm/istio/values-istio-sds-auth.yaml | kubectl apply -f -”
+
+6. Verify the Installation
+
+ “kubectl get svc -n istio-system” && “kubectl get pods -n istio-system”
+
+ Reference -
+1. https://istio.io/docs/setup/kubernetes/install/helm/
+2. https://istio.io/docs/tasks/security/auth-sds/
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/Chart.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/Chart.yaml
new file mode 100644
index 00000000..21e90098
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: File, Block, and Object Storage Services for your Cloud-Native Environment
+name: rook-ceph
+version: 0.0.1
+icon: https://rook.io/images/logos/rook/rook-logo-color-on-transparent.png
+sources:
+ - https://github.com/rook/rook
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/NOTES.txt b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/NOTES.txt
new file mode 100644
index 00000000..0509b574
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/NOTES.txt
@@ -0,0 +1,5 @@
+The Rook Operator has been installed. Check its status by running:
+ kubectl --namespace {{ .Release.Namespace }} get pods -l "app=rook-ceph-operator"
+
+Visit https://rook.io/docs/rook/master for instructions on how
+to create & configure Rook clusters
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/_helpers.tpl b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/_helpers.tpl
new file mode 100644
index 00000000..f0d83d2e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/_helpers.tpl
@@ -0,0 +1,16 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/cluster.yml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/cluster.yml
new file mode 100644
index 00000000..1cd33e8c
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/cluster.yml
@@ -0,0 +1,180 @@
+#################################################################################
+# This example first defines some necessary namespace and RBAC security objects.
+# The actual Ceph Cluster CRD example can be found at the bottom of this example.
+#################################################################################
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph
+rules:
+- apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: [ "get", "list", "watch", "create", "update", "delete" ]
+---
+# Aspects of ceph-mgr that require access to the system namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-system
+ namespace: rook-ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+---
+# Aspects of ceph-mgr that operate within the cluster's namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - ceph.rook.io
+ resources:
+ - "*"
+ verbs:
+ - "*"
+---
+# Allow the operator to create resources in this cluster's namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-cluster-mgmt
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-cluster-mgmt
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: rook-ceph-system
+---
+# Allow the osd pods in this namespace to work with configmaps
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-osd
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-osd
+ namespace: rook-ceph
+---
+# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-mgr
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-system
+ namespace: rook-ceph-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-mgr-system
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-cluster
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-mgr-cluster
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+#################################################################################
+# The Ceph Cluster CRD example
+#################################################################################
+apiVersion: ceph.rook.io/v1
+kind: CephCluster
+metadata:
+ name: rook-ceph
+ namespace: rook-ceph
+spec:
+ cephVersion:
+ # For the latest ceph images, see https://hub.docker.com/r/ceph/ceph/tags
+ image: ceph/ceph:v13.2.2-20181023
+ dataDirHostPath: /var/lib/rook
+ dashboard:
+ enabled: true
+ mon:
+ count: 3
+ allowMultiplePerNode: true
+ storage:
+ useAllNodes: true
+ useAllDevices: false
+ config:
+ databaseSizeMB: "1024"
+ journalSizeMB: "1024" \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrole.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrole.yaml
new file mode 100644
index 00000000..58a24d47
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrole.yaml
@@ -0,0 +1,165 @@
+{{- if .Values.rbacEnable }}
+# The cluster role for managing all the cluster-specific resources in a namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-cluster-mgmt
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ - pods
+ - pods/log
+ - services
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - extensions
+ resources:
+ - deployments
+ - daemonsets
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+---
+# The cluster role for managing the Rook CRDs
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-global
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ # Pod access is needed for fencing
+ - pods
+ # Node access is needed for determining nodes where mons should run
+ - nodes
+ - nodes/proxy
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ # PVs and PVCs are managed by the Rook provisioner
+ - persistentvolumes
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - ceph.rook.io
+ resources:
+ - "*"
+ verbs:
+ - "*"
+- apiGroups:
+ - rook.io
+ resources:
+ - "*"
+ verbs:
+ - "*"
+---
+# Aspects of ceph-mgr that require cluster-wide access
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-cluster
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - nodes
+ - nodes/proxy
+ verbs:
+ - get
+ - list
+ - watch
+{{- if ((.Values.agent) and .Values.agent.mountSecurityMode) and ne .Values.agent.mountSecurityMode "Any" }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-agent-mount
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+{{- end }}
+{{- if .Values.pspEnable }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-system-psp-user
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+rules:
+- apiGroups:
+ - extensions
+ resources:
+ - podsecuritypolicies
+ resourceNames:
+ - 00-rook-ceph-operator
+ verbs:
+ - use
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrolebinding.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..845eb6d7
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrolebinding.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.rbacEnable }}
+# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-global
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-global
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+{{- if .Values.pspEnable }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: rook-ceph-system-psp-users
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-system-psp-user
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/dashboard-external-http.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/dashboard-external-http.yaml
new file mode 100644
index 00000000..ee521152
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/dashboard-external-http.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: rook-ceph-mgr-dashboard-external-http
+ namespace: rook-ceph
+ labels:
+ app: rook-ceph-mgr
+ rook_cluster: rook-ceph
+ annotations:
+ "helm.sh/hook": "post-install"
+ "helm.sh/hook-weight": "10"
+spec:
+ ports:
+ - name: dashboard
+ port: 7000
+ protocol: TCP
+ targetPort: 7000
+ selector:
+ app: rook-ceph-mgr
+ rook_cluster: rook-ceph
+ sessionAffinity: None
+ type: NodePort
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/deployment.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/deployment.yaml
new file mode 100644
index 00000000..13c6a763
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/deployment.yaml
@@ -0,0 +1,108 @@
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ name: rook-ceph-operator
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: rook-ceph-operator
+ template:
+ metadata:
+ labels:
+ app: rook-ceph-operator
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+{{- if .Values.annotations }}
+ annotations:
+{{ toYaml .Values.annotations | indent 8 }}
+{{- end }}
+ spec:
+ containers:
+ - name: rook-ceph-operator
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ args: ["ceph", "operator"]
+ env:
+{{- if not .Values.rbacEnable }}
+ - name: RBAC_ENABLED
+ value: "false"
+{{- end }}
+{{- if .Values.agent }}
+{{- if .Values.agent.toleration }}
+ - name: AGENT_TOLERATION
+ value: {{ .Values.agent.toleration }}
+{{- end }}
+{{- if .Values.agent.tolerationKey }}
+ - name: AGENT_TOLERATION_KEY
+ value: {{ .Values.agent.tolerationKey }}
+{{- end }}
+{{- if .Values.agent.mountSecurityMode }}
+ - name: AGENT_MOUNT_SECURITY_MODE
+ value: {{ .Values.agent.mountSecurityMode }}
+{{- end }}
+{{- if .Values.agent.flexVolumeDirPath }}
+ - name: FLEXVOLUME_DIR_PATH
+ value: {{ .Values.agent.flexVolumeDirPath }}
+{{- end }}
+{{- if .Values.agent.libModulesDirPath }}
+ - name: LIB_MODULES_DIR_PATH
+ value: {{ .Values.agent.libModulesDirPath }}
+{{- end }}
+{{- if .Values.agent.mounts }}
+ - name: AGENT_MOUNTS
+ value: {{ .Values.agent.mounts }}
+{{- end }}
+{{- end }}
+{{- if .Values.discover }}
+{{- if .Values.discover.toleration }}
+ - name: DISCOVER_TOLERATION
+ value: {{ .Values.agent.toleration }}
+{{- end }}
+{{- if .Values.discover.tolerationKey }}
+ - name: DISCOVER_TOLERATION_KEY
+ value: {{ .Values.discover.tolerationKey }}
+{{- end }}
+{{- end }}
+ - name: ROOK_LOG_LEVEL
+ value: {{ .Values.logLevel }}
+ - name: ROOK_ENABLE_SELINUX_RELABELING
+ value: {{ .Values.enableSelinuxRelabeling | quote }}
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+{{- if .Values.mon }}
+{{- if .Values.mon.healthCheckInterval }}
+ - name: ROOK_MON_HEALTHCHECK_INTERVAL
+ value: {{ .Values.mon.healthCheckInterval }}
+{{- end }}
+{{- if .Values.mon.monOutTimeout }}
+ - name: ROOK_MON_OUT_TIMEOUT
+ value: {{ .Values.mon.monOutTimeout }}
+{{- end }}
+{{- end }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+{{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+{{- end }}
+{{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+{{- end }}
+{{- if .Values.rbacEnable }}
+ serviceAccountName: rook-ceph-system
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/psp.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/psp.yaml
new file mode 100644
index 00000000..412b2437
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/psp.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.pspEnable }}
+# PSP for rook-ceph-operator
+
+# Most of the teams follow the kubernetes docs and have these PSPs.
+# * privileged (for kube-system namespace)
+# * restricted (for all logged in users)
+#
+# If we name it as `rook-ceph-operator`, it comes next to `restricted` PSP alphabetically,
+# and applies `restricted` capabilities to `rook-system`. Thats reason this is named with `00-rook-ceph-operator`,
+# so it stays somewhere close to top and `rook-system` gets the intended PSP.
+#
+# More info on PSP ordering : https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-order
+
+apiVersion: extensions/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: 00-rook-ceph-operator
+spec:
+ fsGroup:
+ rule: RunAsAny
+ privileged: true
+ runAsUser:
+ rule: RunAsAny
+ seLinux:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ volumes:
+ - '*'
+ allowedCapabilities:
+ - '*'
+ hostPID: true
+ hostIPC: true
+ hostNetwork: true
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/resources.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/resources.yaml
new file mode 100644
index 00000000..e296663f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/resources.yaml
@@ -0,0 +1,177 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephclusters.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephCluster
+ listKind: CephClusterList
+ plural: cephclusters
+ singular: cephcluster
+ scope: Namespaced
+ version: v1
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ cephVersion:
+ properties:
+ allowUnsupported:
+ type: boolean
+ image:
+ type: string
+ name:
+ pattern: ^(luminous|mimic|nautilus)$
+ type: string
+ dashboard:
+ properties:
+ enabled:
+ type: boolean
+ urlPrefix:
+ type: string
+ port:
+ type: integer
+ minimum: 0
+ maximum: 65535
+ dataDirHostPath:
+ pattern: ^/(\S+)
+ type: string
+ mon:
+ properties:
+ allowMultiplePerNode:
+ type: boolean
+ count:
+ maximum: 9
+ minimum: 1
+ type: integer
+ required:
+ - count
+ network:
+ properties:
+ hostNetwork:
+ type: boolean
+ storage:
+ properties:
+ nodes:
+ items: {}
+ type: array
+ useAllDevices: {}
+ useAllNodes:
+ type: boolean
+ required:
+ - mon
+ additionalPrinterColumns:
+ - name: DataDirHostPath
+ type: string
+ description: Directory used on the K8s nodes
+ JSONPath: .spec.dataDirHostPath
+ - name: MonCount
+ type: string
+ description: Number of MONs
+ JSONPath: .spec.mon.count
+ - name: Age
+ type: date
+ JSONPath: .metadata.creationTimestamp
+ - name: State
+ type: string
+ description: Current State
+ JSONPath: .status.state
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephfilesystems.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephFilesystem
+ listKind: CephFilesystemList
+ plural: cephfilesystems
+ singular: cephfilesystem
+ scope: Namespaced
+ version: v1
+ additionalPrinterColumns:
+ - name: MdsCount
+ type: string
+ description: Number of MDSs
+ JSONPath: .spec.metadataServer.activeCount
+ - name: Age
+ type: date
+ JSONPath: .metadata.creationTimestamp
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephobjectstores.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephObjectStore
+ listKind: CephObjectStoreList
+ plural: cephobjectstores
+ singular: cephobjectstore
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephobjectstoreusers.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephObjectStoreUser
+ listKind: CephObjectStoreUserList
+ plural: cephobjectstoreusers
+ singular: cephobjectstoreuser
+ shortNames:
+ - rcou
+ - objectuser
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephblockpools.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephBlockPool
+ listKind: CephBlockPoolList
+ plural: cephblockpools
+ singular: cephblockpool
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: volumes.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: rook.io
+ names:
+ kind: Volume
+ listKind: VolumeList
+ plural: volumes
+ singular: volume
+ shortNames:
+ - rv
+ scope: Namespaced
+ version: v1alpha2
+---
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/role.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/role.yaml
new file mode 100644
index 00000000..45122d32
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/role.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.rbacEnable }}
+# The role for the operator to manage resources in the system namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+ name: rook-ceph-system
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/rolebinding.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/rolebinding.yaml
new file mode 100644
index 00000000..3ef5897f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/rolebinding.yaml
@@ -0,0 +1,19 @@
+{{- if .Values.rbacEnable }}
+# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+ labels:
+ operator: rook
+ storage-backend: ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-system
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/serviceaccount.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/serviceaccount.yaml
new file mode 100644
index 00000000..7b42de17
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/serviceaccount.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-system
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/storageclass.yml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/storageclass.yml
new file mode 100644
index 00000000..38ddf5d7
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/storageclass.yml
@@ -0,0 +1,28 @@
+apiVersion: ceph.rook.io/v1
+kind: CephBlockPool
+metadata:
+ name: replicapool
+ namespace: rook-ceph
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+ "helm.sh/hook": post-install
+spec:
+ failureDomain: host
+ replicated:
+ size: 1
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: rook-ceph-block
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+ "helm.sh/hook": post-install
+provisioner: ceph.rook.io/block
+parameters:
+ blockPool: replicapool
+ # The value of "clusterNamespace" MUST be the same as the one in which your rook cluster exist
+ clusterNamespace: rook-ceph
+ # Specify the filesystem type of the volume. If not specified, it will use `ext4`.
+ fstype: xfs
+# Optional, default reclaimPolicy is "Delete". Other options are: "Retain", "Recycle" as documented in https://kubernetes.io/docs/concepts/storage/storage-classes/ \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/tool-box.yml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/tool-box.yml
new file mode 100644
index 00000000..98bc3c98
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/tool-box.yml
@@ -0,0 +1,62 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: rook-ceph-tools
+ namespace: rook-ceph
+ labels:
+ app: rook-ceph-tools
+ annotations:
+ "helm.sh/hook": "post-install"
+ "helm.sh/hook-weight": "10"
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: rook-ceph-tools
+ template:
+ metadata:
+ labels:
+ app: rook-ceph-tools
+ spec:
+ dnsPolicy: ClusterFirstWithHostNet
+ containers:
+ - name: rook-ceph-tools
+ image: rook/ceph:v0.9.1
+ command: ["/tini"]
+ args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: ROOK_ADMIN_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: rook-ceph-mon
+ key: admin-secret
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /dev
+ name: dev
+ - mountPath: /sys/bus
+ name: sysbus
+ - mountPath: /lib/modules
+ name: libmodules
+ - name: mon-endpoint-volume
+ mountPath: /etc/rook
+ # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
+ hostNetwork: true
+ volumes:
+ - name: dev
+ hostPath:
+ path: /dev
+ - name: sysbus
+ hostPath:
+ path: /sys/bus
+ - name: libmodules
+ hostPath:
+ path: /lib/modules
+ - name: mon-endpoint-volume
+ configMap:
+ name: rook-ceph-mon-endpoints
+ items:
+ - key: data
+ path: mon-endpoints
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/values.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/values.yaml
new file mode 100644
index 00000000..7b4d07bd
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/values.yaml
@@ -0,0 +1,75 @@
+# Default values for rook-ceph-operator
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+image:
+ prefix: rook
+ repository: rook/ceph
+ tag: v0.9.1
+ pullPolicy: IfNotPresent
+
+hyperkube:
+ repository: k8s.gcr.io/hyperkube
+ tag: v1.7.12
+ pullPolicy: IfNotPresent
+
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+nodeSelector:
+# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
+# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+# disktype: ssd
+
+# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
+tolerations: []
+
+mon:
+ healthCheckInterval: "45s"
+ monOutTimeout: "300s"
+
+## Annotations to be added to pod
+annotations: {}
+
+## LogLevel can be set to: TRACE, DEBUG, INFO, NOTICE, WARNING, ERROR or CRITICAL
+logLevel: INFO
+
+## If true, create & use RBAC resources
+##
+rbacEnable: false
+
+## If true, create & use PSP resources
+##
+pspEnable: true
+
+## Rook Agent configuration
+## toleration: NoSchedule, PreferNoSchedule or NoExecute
+## tolerationKey: Set this to the specific key of the taint to tolerate
+## flexVolumeDirPath: The path where the Rook agent discovers the flex volume plugins
+## libModulesDirPath: The path where the Rook agent can find kernel modules
+# agent:
+# toleration: NoSchedule
+# tolerationKey: key
+# mountSecurityMode: Any
+## For information on FlexVolume path, please refer to https://rook.io/docs/rook/master/flexvolume.html
+# flexVolumeDirPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
+# libModulesDirPath: /lib/modules
+# mounts: mount1=/host/path:/container/path,/host/path2:/container/path2
+agent:
+ flexVolumeDirPath: /var/lib/kubelet/volumeplugins
+## Rook Discover configuration
+## toleration: NoSchedule, PreferNoSchedule or NoExecute
+## tolerationKey: Set this to the specific key of the taint to tolerate
+# discover:
+# toleration: NoSchedule
+# tolerationKey: key
+
+# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
+# Disable it here if you have similiar issues.
+# For more details see https://github.com/rook/rook/issues/2417
+enableSelinuxRelabeling: true
diff --git a/vnfs/DAaaS/deploy/collection/.helmignore b/vnfs/DAaaS/deploy/collection/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/deploy/collection/Chart.yaml b/vnfs/DAaaS/deploy/collection/Chart.yaml
new file mode 100644
index 00000000..f21cc894
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: Helm charts for collection agents and collection service
+name: collection
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/collection/charts/cadvisor/.helmignore b/vnfs/DAaaS/deploy/collection/charts/cadvisor/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/cadvisor/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/deploy/collection/charts/cadvisor/Chart.yaml b/vnfs/DAaaS/deploy/collection/charts/cadvisor/Chart.yaml
new file mode 100644
index 00000000..aea55a29
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/cadvisor/Chart.yaml
@@ -0,0 +1,19 @@
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+appVersion: "1.0"
+description: Cadvisor Helm Chart
+name: cadvisor
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/NOTES.txt b/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/NOTES.txt
new file mode 100644
index 00000000..3eea9994
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/NOTES.txt
@@ -0,0 +1,34 @@
+# Copyright (c) 2019 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.cadvisor_prometheus.service.type }}
+ NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "name" . }})
+ NODE_IPS=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[*].status.addresses[0].address}")
+ visit http://NODE_IP:NODE_PORT
+{{- else if contains "LoadBalancer" .Values.cadvisor_prometheus.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "name" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.cadvisor_prometheus.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace={{ .Release.Namespace }} -l "app={{ template "name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:{{ .Values.cadvisor_prometheus.service.targetPort }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/_helpers.tpl b/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/_helpers.tpl
new file mode 100644
index 00000000..ea2a3266
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/_helpers.tpl
@@ -0,0 +1,25 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/* Workaround for https://github.com/helm/helm/issues/3117 */}}
+{{- define "rangeskipempty" -}}
+{{- range $key, $value := . }}
+{{- if $value }}
+{{ $key }}: {{ $value }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/daemonset.yaml b/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/daemonset.yaml
new file mode 100644
index 00000000..e287c7d8
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/daemonset.yaml
@@ -0,0 +1,79 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+apiVersion: apps/v1beta2
+kind: DaemonSet
+metadata:
+ name: {{ template "fullname" . }}
+ labels:
+ app: {{ template "name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ name: {{ template "fullname" . }}
+ template:
+ metadata:
+ labels:
+ name: {{ template "fullname" . }}
+ app: {{ template "name" . }}
+ collector: cadvisor
+ release: {{ .Release.Name }}
+ spec:
+ hostNetwork: true
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: rootfs
+ mountPath: /rootfs
+ readOnly: true
+ - name: var-run
+ mountPath: /var/run
+ readOnly: false
+ - name: sys
+ mountPath: /sys
+ readOnly: true
+ - name: docker
+ mountPath: /var/lib/docker
+ readOnly: true
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ args:
+ - --housekeeping_interval=10s
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: rootfs
+ hostPath:
+ path: /
+ - name: var-run
+ hostPath:
+ path: /var/run
+ - name: sys
+ hostPath:
+ path: /sys
+ - name: docker
+ hostPath:
+ path: /var/lib/docker
diff --git a/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/service.yaml b/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/service.yaml
new file mode 100644
index 00000000..a8a932d1
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/cadvisor/templates/service.yaml
@@ -0,0 +1,37 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: cadvisor
+ labels:
+ app: cadvisor
+ release: {{ .Release.Name }}
+spec:
+ type: NodePort
+ externalTrafficPolicy: Local
+ selector:
+{{ include "rangeskipempty" .Values.cadvisor_prometheus.service.selector | indent 4 }}
+ ports:
+ - name: cadvisor-prometheus
+ {{- if eq .Values.cadvisor_prometheus.service.type "NodePort" }}
+ nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.cadvisor_prometheus.service.nodePort }}
+ {{- end }}
+ protocol: TCP
+ port: {{ .Values.cadvisor_prometheus.service.port }}
+ nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.cadvisor_prometheus.service.nodePort }}
+ targetPort: {{ .Values.cadvisor_prometheus.service.targetPort }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/cadvisor/values.yaml b/vnfs/DAaaS/deploy/collection/charts/cadvisor/values.yaml
new file mode 100644
index 00000000..a17c3dd5
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/cadvisor/values.yaml
@@ -0,0 +1,23 @@
+# Default values for cadvisor.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+ingress:
+ enabled: false
+image:
+ repository: google/cadvisor
+ tag: latest
+ pullPolicy: IfNotPresent
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+cadvisor_prometheus:
+ service:
+ type: NodePort
+ port: 80
+ nodePort: 91
+ targetPort: 8080
+ selector:
+ app: cadvisor
+ collector: cadvisor
diff --git a/vnfs/DAaaS/deploy/collection/charts/collectd/.helmignore b/vnfs/DAaaS/deploy/collection/charts/collectd/.helmignore
new file mode 100644
index 00000000..f0c13194
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/collectd/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/vnfs/DAaaS/deploy/collection/charts/collectd/Chart.yaml b/vnfs/DAaaS/deploy/collection/charts/collectd/Chart.yaml
new file mode 100644
index 00000000..fcdcfde9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/collectd/Chart.yaml
@@ -0,0 +1,19 @@
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+appVersion: "7.1.0"
+description: Collectd Helm Chart
+name: collectd
+version: 0.2.0
diff --git a/vnfs/DAaaS/deploy/collection/charts/collectd/resources/config/collectd.conf b/vnfs/DAaaS/deploy/collection/charts/collectd/resources/config/collectd.conf
new file mode 100644
index 00000000..f62adb6a
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/collectd/resources/config/collectd.conf
@@ -0,0 +1,44 @@
+FQDNLookup false
+LoadPlugin cpu
+LoadPlugin memory
+LoadPlugin cpufreq
+LoadPlugin disk
+LoadPlugin ethstat
+LoadPlugin ipc
+LoadPlugin ipmi
+LoadPlugin load
+LoadPlugin numa
+LoadPlugin processes
+LoadPlugin df
+LoadPlugin turbostat
+LoadPlugin uptime
+LoadPlugin contextswitch
+LoadPlugin irq
+LoadPlugin df
+LoadPlugin swap
+LoadPlugin write_prometheus
+
+LoadPlugin logfile
+<Plugin logfile>
+ LogLevel info
+ File "/var/log/collectd.log"
+ Timestamp true
+ PrintSeverity false
+</Plugin>
+<Plugin "cpu">
+ Interval 5
+ ReportByState false
+ ReportByCpu false
+</Plugin>
+
+<Plugin "memory">
+ Interval 30
+ ValuesAbsolute false
+ ValuesPercentage true
+</Plugin>
+
+<Plugin "write_prometheus">
+ Port "{{ .Values.prometheus_port }}"
+</Plugin>
+
+#Last line (collectd requires ‘\n’ at the last line)
diff --git a/vnfs/DAaaS/deploy/collection/charts/collectd/templates/NOTES.txt b/vnfs/DAaaS/deploy/collection/charts/collectd/templates/NOTES.txt
new file mode 100644
index 00000000..06ca128b
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/collectd/templates/NOTES.txt
@@ -0,0 +1,34 @@
+# Copyright (c) 2019 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+ http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.collectd_prometheus.service.type }}
+ NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "name" . }})
+ NODE_IPS=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[*].status.addresses[0].address}")
+ visit http://NODE_IP:NODE_PORT
+{{- else if contains "LoadBalancer" .Values.collectd_prometheus.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "name" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.collectd_prometheus.service.type }}
+ CLUSTER_NODE_IPS=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[*].status.addresses[0].address}")
+ CLUSTER_NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].port}" services {{ include "name" . }})
+ visit http://CLUSTER_NODE_IP:CLUSTER_NODE_PORT
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/collectd/templates/_helpers.tpl b/vnfs/DAaaS/deploy/collection/charts/collectd/templates/_helpers.tpl
new file mode 100644
index 00000000..b5e98086
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/collectd/templates/_helpers.tpl
@@ -0,0 +1,25 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/* Workaround for https://github.com/helm/helm/issues/3117 */}}
+{{- define "rangeskipempty" -}}
+{{- range $key, $value := . }}
+{{- if $value }}
+{{ $key }}: {{ $value }}
+{{- end }}
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/collection/charts/collectd/templates/configmap.yaml b/vnfs/DAaaS/deploy/collection/charts/collectd/templates/configmap.yaml
new file mode 100644
index 00000000..5f5dde01
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/collectd/templates/configmap.yaml
@@ -0,0 +1,27 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "fullname" . }}-config
+ labels:
+ app: {{ template "name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+data:
+ node-collectd.conf: |-
+ {{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/collectd/templates/daemonset.yaml b/vnfs/DAaaS/deploy/collection/charts/collectd/templates/daemonset.yaml
new file mode 100644
index 00000000..29fdded9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/collectd/templates/daemonset.yaml
@@ -0,0 +1,83 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: {{ template "fullname" . }}
+ annotations:
+ checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ labels:
+ app: {{ template "name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: {{ template "name" . }}
+ collector: collectd
+ release: {{ .Release.Name }}
+ spec:
+ hostNetwork: true
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: {{ template "fullname" . }}-config
+ mountPath: /opt/collectd/etc
+ - name: proc
+ mountPath: /mnt/proc
+ readOnly: true
+ - name: root
+ mountPath: /hostfs
+ readOnly: true
+ - name: etc
+ mountPath: /mnt/etc
+ readOnly: true
+ - name: run
+ mountPath: /var/run/docker.sock
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+ {{- end }}
+ volumes:
+ - name: {{ template "fullname" . }}-config
+ configMap:
+ name: {{ template "fullname" . }}-config
+ items:
+ - key: node-collectd.conf
+ path: collectd.conf
+ - name: proc
+ hostPath:
+ path: /proc
+ - name: root
+ hostPath:
+ path: /
+ - name: etc
+ hostPath:
+ path: /etc
+ - name: run
+ hostPath:
+ path: /var/run/docker.sock
diff --git a/vnfs/DAaaS/deploy/collection/charts/collectd/templates/service.yaml b/vnfs/DAaaS/deploy/collection/charts/collectd/templates/service.yaml
new file mode 100644
index 00000000..7571715d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/collectd/templates/service.yaml
@@ -0,0 +1,32 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: collectd
+ labels:
+ app: collectd
+ release: {{ .Release.Name }}
+spec:
+ ports:
+ - name: collectd-prometheus
+ port: {{ .Values.collectd_prometheus.service.port }}
+ protocol: TCP
+ targetPort: {{ .Values.collectd_prometheus.service.targetPort }}
+ selector:
+{{ include "rangeskipempty" .Values.collectd_prometheus.service.selector | indent 4 }}
+ type: ClusterIP
diff --git a/vnfs/DAaaS/deploy/collection/charts/collectd/values.yaml b/vnfs/DAaaS/deploy/collection/charts/collectd/values.yaml
new file mode 100644
index 00000000..fdfcae71
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/collectd/values.yaml
@@ -0,0 +1,28 @@
+# Default values for collectd.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+ingress:
+ enabled: false
+image:
+ repository: opnfv/barometer-collectd
+ tag: latest
+ pullPolicy: IfNotPresent
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+collectd_prometheus:
+ service:
+ type: ClusterIP
+ port: 9103
+ targetPort: 9103
+ selector:
+ app: collectd
+ collector: collectd
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/.helmignore b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/.helmignore
new file mode 100755
index 00000000..f0c13194
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/Chart.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/Chart.yaml
new file mode 100755
index 00000000..2e45b3a3
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/Chart.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+appVersion: 0.17.0
+description: A Helm chart for prometheus node-exporter
+home: https://github.com/prometheus/node_exporter/
+keywords:
+- node-exporter
+- prometheus
+- exporter
+maintainers:
+- email: gianrubio@gmail.com
+ name: gianrubio
+name: prometheus-node-exporter
+sources:
+- https://github.com/prometheus/node_exporter/
+version: 1.3.2
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/OWNERS b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/OWNERS
new file mode 100755
index 00000000..fe9b2c3d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/OWNERS
@@ -0,0 +1,4 @@
+approvers:
+- gianrubio
+reviewers:
+- gianrubio \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/README.md b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/README.md
new file mode 100755
index 00000000..b9b50101
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/README.md
@@ -0,0 +1,80 @@
+# Prometheus Node Exporter
+
+* Installs prometheus [node exporter](https://github.com/prometheus/node_exporter)
+
+## TL;DR;
+
+```console
+$ helm install stable/prometheus-node-exporter
+```
+
+## Introduction
+
+This chart bootstraps a prometheus [node exporter](http://github.com/prometheus/node_exporter) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+$ helm install --name my-release stable/prometheus-node-exporter
+```
+
+The command deploys node exporter on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Configuration
+
+The following table lists the configurable parameters of the Node Exporter chart and their default values.
+
+| Parameter | Description | Default | |
+| --------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | --------------------------------------- | --- |
+| `image.repository` | Image repository | `quay.io/prometheus/node-exporter` | |
+| `image.tag` | Image tag | `v0.16.0` | |
+| `image.pullPolicy` | Image pull policy | `IfNotPresent` | |
+| `extraArgs` | Additional container arguments | `[]` | |
+| `extraHostVolumeMounts` | Additional host volume mounts | {} | |
+| `podLabels` | Additional labels to be added to pods | {} | |
+| `rbac.create` | If true, create & use RBAC resources | `true` | |
+| `rbac.pspEnabled` | Specifies whether a PodSecurityPolicy should be created. | `true` | |
+| `resources` | CPU/Memory resource requests/limits | `{}` | |
+| `service.type` | Service type | `ClusterIP` | |
+| `service.port` | The service port | `9100` | |
+| `service.targetPort` | The target port of the container | `9100` | |
+| `service.nodePort` | The node port of the service | | |
+| `service.annotations` | Kubernetes service annotations | `{prometheus.io/scrape: "true"}` | |
+| `serviceAccount.create` | Specifies whether a service account should be created. | `true` | |
+| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | | |
+| `serviceAccount.imagePullSecrets` | Specify image pull secrets | `[]` | |
+| `securityContext` | SecurityContext | `{"runAsNonRoot": true, "runAsUser": 65534}` | |
+| `affinity` | A group of affinity scheduling rules for pod assignment | `{}` | |
+| `nodeSelector` | Node labels for pod assignment | `{}` | |
+| `tolerations` | List of node taints to tolerate | `- effect: NoSchedule operator: Exists` | |
+| `priorityClassName` | Name of Priority Class to assign pods | `nil` | |
+| `endpoints` | list of addresses that have node exporter deployed outside of the cluster | `[]` | |
+| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | |
+| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | |
+| `prometheus.monitor.namespace` | namespace where servicemonitor resource should be created | `the same namespace as prometheus node exporter` | |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install --name my-release \
+ --set serviceAccount.name=node-exporter \
+ stable/prometheus-node-exporter
+```
+
+Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install --name my-release -f values.yaml stable/prometheus-node-exporter
+```
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/NOTES.txt b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/NOTES.txt
new file mode 100755
index 00000000..4902798f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/NOTES.txt
@@ -0,0 +1,15 @@
+1. Get the application URL by running these commands:
+{{- if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus-node-exporter.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ template "prometheus-node-exporter.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus-node-exporter.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus-node-exporter.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:8080 to use your application"
+ kubectl port-forward $POD_NAME 8080:80
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/_helpers.tpl b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/_helpers.tpl
new file mode 100755
index 00000000..4f0e2dfe
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/_helpers.tpl
@@ -0,0 +1,55 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "prometheus-node-exporter.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "prometheus-node-exporter.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Generate basic labels */}}
+{{- define "prometheus-node-exporter.labels" }}
+app: {{ template "prometheus-node-exporter.name" . }}
+heritage: {{.Release.Service }}
+release: {{.Release.Name }}
+chart: {{ template "prometheus-node-exporter.chart" . }}
+{{- if .Values.podLabels}}
+{{ toYaml .Values.podLabels }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "prometheus-node-exporter.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "prometheus-node-exporter.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "prometheus-node-exporter.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/daemonset.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/daemonset.yaml
new file mode 100755
index 00000000..dfb15f1d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/daemonset.yaml
@@ -0,0 +1,98 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: {{ template "prometheus-node-exporter.fullname" . }}
+ labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "prometheus-node-exporter.name" . }}
+ release: {{ .Release.Name }}
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels: {{ include "prometheus-node-exporter.labels" . | indent 8 }}
+ spec:
+{{- if and .Values.rbac.create .Values.serviceAccount.create }}
+ serviceAccountName: {{ template "prometheus-node-exporter.serviceAccountName" . }}
+{{- end }}
+{{- if .Values.securityContext }}
+ securityContext:
+{{ toYaml .Values.securityContext | indent 8 }}
+{{- end }}
+{{- if .Values.priorityClassName }}
+ priorityClassName: {{ .Values.priorityClassName }}
+{{- end }}
+ containers:
+ - name: node-exporter
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ args:
+ - --path.procfs=/host/proc
+ - --path.sysfs=/host/sys
+ - --web.listen-address=0.0.0.0:{{ .Values.service.port }}
+{{- if .Values.extraArgs }}
+{{ toYaml .Values.extraArgs | indent 12 }}
+{{- end }}
+ ports:
+ - name: metrics
+ containerPort: {{ .Values.service.targetPort }}
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /
+ port: {{ .Values.service.port }}
+ readinessProbe:
+ httpGet:
+ path: /
+ port: {{ .Values.service.port }}
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ volumeMounts:
+ - name: proc
+ mountPath: /host/proc
+ readOnly: true
+ - name: sys
+ mountPath: /host/sys
+ readOnly: true
+ {{- if .Values.extraHostVolumeMounts }}
+ {{- range $_, $mount := .Values.extraHostVolumeMounts }}
+ - name: {{ $mount.name }}
+ mountPath: {{ $mount.mountPath }}
+ readOnly: {{ $mount.readOnly }}
+ {{- if $mount.mountPropagation }}
+ mountPropagation: {{ $mount.mountPropagation }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ hostNetwork: true
+ hostPID: true
+{{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+{{- end }}
+{{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+{{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ volumes:
+ - name: proc
+ hostPath:
+ path: /proc
+ - name: sys
+ hostPath:
+ path: /sys
+ {{- if .Values.extraHostVolumeMounts }}
+ {{- range $_, $mount := .Values.extraHostVolumeMounts }}
+ - name: {{ $mount.name }}
+ hostPath:
+ path: {{ $mount.hostPath }}
+ {{- end }}
+ {{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/endpoints.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/endpoints.yaml
new file mode 100755
index 00000000..4c5c75fa
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/endpoints.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.endpoints }}
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ template "prometheus-node-exporter.fullname" . }}
+ labels:
+{{ include "prometheus-node-exporter.labels" . | indent 4 }}
+subsets:
+ - addresses:
+ {{- range .Values.endpoints }}
+ - ip: {{ . }}
+ {{- end }}
+ ports:
+ - name: metrics
+ port: 9100
+ protocol: TCP
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/monitor.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/monitor.yaml
new file mode 100755
index 00000000..9c723e69
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/monitor.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.prometheus.monitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ template "prometheus-node-exporter.fullname" . }}
+ labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }}
+ {{- if .Values.prometheus.monitor.additionalLabels }}
+{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }}
+ {{- end }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "prometheus-node-exporter.name" . }}
+ release: {{ .Release.Name }}
+ endpoints:
+ - port: metrics
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml
new file mode 100755
index 00000000..3d0a636a
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml
@@ -0,0 +1,15 @@
+{{- if .Values.rbac.create }}
+{{- if .Values.rbac.pspEnabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }}
+ name: psp-{{ template "prometheus-node-exporter.fullname" . }}
+rules:
+- apiGroups: ['extensions']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames:
+ - {{ template "prometheus-node-exporter.fullname" . }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml
new file mode 100755
index 00000000..50f7a149
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.rbac.create }}
+{{- if .Values.rbac.pspEnabled }}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }}
+ name: psp-{{ template "prometheus-node-exporter.fullname" . }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp-{{ template "prometheus-node-exporter.fullname" . }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "prometheus-node-exporter.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp.yaml
new file mode 100755
index 00000000..1fa6f289
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/psp.yaml
@@ -0,0 +1,51 @@
+{{- if .Values.rbac.create }}
+{{- if .Values.rbac.pspEnabled }}
+apiVersion: extensions/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }}
+ name: {{ template "prometheus-node-exporter.fullname" . }}
+spec:
+ privileged: false
+ # Required to prevent escalations to root.
+ # allowPrivilegeEscalation: false
+ # This is redundant with non-root + disallow privilege escalation,
+ # but we can provide it for defense in depth.
+ #requiredDropCapabilities:
+ # - ALL
+ # Allow core volume types.
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ - 'persistentVolumeClaim'
+ - 'hostPath'
+ hostNetwork: true
+ hostIPC: false
+ hostPID: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ runAsUser:
+ # Permits the container to run with root privileges as well.
+ rule: 'RunAsAny'
+ seLinux:
+ # This policy assumes the nodes are using AppArmor rather than SELinux.
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 0
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 0
+ max: 65535
+ readOnlyRootFilesystem: false
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/service.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/service.yaml
new file mode 100755
index 00000000..cffe547b
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/service.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "prometheus-node-exporter.fullname" . }}
+{{- if .Values.service.annotations }}
+ annotations:
+{{ toYaml .Values.service.annotations | indent 4 }}
+{{- end }}
+ labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ {{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }}
+ nodePort: {{ .Values.service.nodePort }}
+ {{- end }}
+ targetPort: {{ .Values.service.targetPort }}
+ protocol: TCP
+ name: metrics
+ selector:
+ app: {{ template "prometheus-node-exporter.name" . }}
+ release: {{ .Release.Name }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/serviceaccount.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/serviceaccount.yaml
new file mode 100755
index 00000000..b70745aa
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/templates/serviceaccount.yaml
@@ -0,0 +1,15 @@
+{{- if .Values.rbac.create -}}
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "prometheus-node-exporter.serviceAccountName" . }}
+ labels:
+ app: {{ template "prometheus-node-exporter.name" . }}
+ chart: {{ template "prometheus-node-exporter.chart" . }}
+ release: "{{ .Release.Name }}"
+ heritage: "{{ .Release.Service }}"
+imagePullSecrets:
+{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }}
+{{- end -}}
+{{- end -}} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/values.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/values.yaml
new file mode 100755
index 00000000..14bcfc53
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus-node-exporter/values.yaml
@@ -0,0 +1,96 @@
+# Default values for prometheus-node-exporter.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+image:
+ repository: quay.io/prometheus/node-exporter
+ tag: v0.17.0
+ pullPolicy: IfNotPresent
+
+service:
+ type: ClusterIP
+ port: 9100
+ targetPort: 9100
+ nodePort:
+ annotations:
+ prometheus.io/scrape: "true"
+
+prometheus:
+ monitor:
+ enabled: false
+ additionalLabels: {}
+ namespace: ""
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 200m
+ # memory: 50Mi
+ # requests:
+ # cpu: 100m
+ # memory: 30Mi
+
+serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+ imagePullSecrets: []
+
+securityContext:
+ runAsNonRoot: true
+ runAsUser: 65534
+
+rbac:
+ ## If true, create & use RBAC resources
+ ##
+ create: true
+ ## If true, create & use Pod Security Policy resources
+ ## https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+ pspEnabled: true
+
+# for deployments that have node_exporter deployed outside of the cluster, list
+# their addresses here
+endpoints: []
+
+## Assign a group of affinity scheduling rules
+##
+affinity: {}
+# nodeAffinity:
+# requiredDuringSchedulingIgnoredDuringExecution:
+# nodeSelectorTerms:
+# - matchFields:
+# - key: metadata.name
+# operator: In
+# values:
+# - target-host-name
+
+## Assign a nodeSelector if operating a hybrid cluster
+##
+nodeSelector: {}
+# beta.kubernetes.io/arch: amd64
+# beta.kubernetes.io/os: linux
+
+tolerations:
+ - effect: NoSchedule
+ operator: Exists
+
+## Assign a PriorityClassName to pods if set
+# priorityClassName: ""
+
+## Additional container arguments
+##
+extraArgs: {}
+# - --collector.diskstats.ignored-devices=^(ram|loop|fd|(h|s|v)d[a-z]|nvme\\d+n\\d+p)\\d+$
+
+## Additional mounts from the host
+##
+extraHostVolumeMounts: {}
+# - name: <mountName>
+# hostPath: <hostPath>
+# mountPath: <mountPath>
+# readOnly: true|false
+# mountPropagation: None|HostToContainer|Bidirectional
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus/.helmignore b/vnfs/DAaaS/deploy/collection/charts/prometheus/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus/Chart.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus/Chart.yaml
new file mode 100644
index 00000000..6e7ddfbc
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: Prometheus instance with remote storage integrations.
+name: prometheus
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/NOTES.txt b/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/NOTES.txt
new file mode 100644
index 00000000..f8882883
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/NOTES.txt
@@ -0,0 +1,15 @@
+1. Get the application URL by running these commands:
+{{ if contains "NodePort" .Values.prometheus.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prometheus.fullname" . }}-prometheus)
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.prometheus.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "prometheus.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prometheus.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.prometheus.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prometheus.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:9090 to use your application"
+ kubectl port-forward $POD_NAME 9090:80
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/_helpers.tpl b/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/_helpers.tpl
new file mode 100644
index 00000000..1ac77dd8
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/_helpers.tpl
@@ -0,0 +1,47 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "prometheus.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "prometheus.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "prometheus.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/* Create chart name and version as used by the chart label. */}}
+{{- define "prometheus.chartref" -}}
+{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}}
+{{- end }}
+
+{{/* Generate basic labels */}}
+{{- define "prometheus.labels" }}
+chart: {{ template "prometheus.chartref" . }}
+release: {{ .Release.Name | quote }}
+heritage: {{ .Release.Service | quote }}
+{{- if .Values.commonLabels}}
+{{ toYaml .Values.commonLabels }}
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/prometheus.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/prometheus.yaml
new file mode 100644
index 00000000..9c3d84c2
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/prometheus.yaml
@@ -0,0 +1,47 @@
+apiVersion: monitoring.coreos.com/v1
+kind: Prometheus
+metadata:
+ name: {{ template "prometheus.fullname" . }}-prometheus
+ labels:
+ app: {{ template "prometheus.name" . }}-prometheus
+ "helm.sh/hook": post-install
+ "helm.sh/hook-weight": "2"
+spec:
+ serviceMonitorSelector:
+ matchLabels:
+ app: {{ template "prometheus.name" . }}-prometheus
+ release: {{ .Release.Name }}
+ serviceMonitorNamespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace | quote }}
+
+ # TODO: Templatizing multiple remote read/write.
+ # Especially Kafka adapater.
+ remoteRead:
+ - url: "{{ .Values.m3db.url }}/api/v1/prom/remote/read"
+ # To test reading even when local Prometheus has the data
+ readRecent: true
+ remoteWrite:
+ - url: "{{ .Values.m3db.url }}/api/v1/prom/remote/write"
+ # To differentiate between local and remote storage we will add a storage label
+ writeRelabelConfigs:
+ - targetLabel: metrics_storage
+ replacement: m3db_remote
+ - url: "{{ .Values.kafkaAdapter.url }}/receive"
+ containers:
+ - name: {{ template "prometheus.name" . }}-adapter
+ image: "{{ .Values.kafkaAdapter.image.repository }}:{{ .Values.kafkaAdapter.image.tag }}"
+ imagePullPolicy: {{ .Values.kafkaAdapter.image.pullPolicy }}
+ env:
+ - name: KAFKA_BROKER_LIST
+ value: {{ .Values.kafkaAdapter.broker }}
+ - name: KAFKA_TOPIC
+ value: {{ .Values.kafkaAdapter.topic }}
+ - name: SERIALIZATION_FORMAT
+ value: {{ .Values.kafkaAdapter.serializationFormat }}
+ - name: PORT
+ value: {{ quote .Values.kafkaAdapter.port }}
+ - name: LOG_LEVEL
+ value: {{ .Values.kafkaAdapter.logLevel }}
+ resources:
+{{ toYaml .Values.kafkaAdapter.resources | indent 6 }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/service.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/service.yaml
new file mode 100644
index 00000000..0114ed2e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/service.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "prometheus.fullname" . }}-prometheus
+ labels:
+ app: {{ template "prometheus.name" . }}-prometheus
+{{- if .Values.prometheus.service.annotations }}
+ annotations:
+{{ toYaml .Values.prometheus.service.annotations | indent 4 }}
+{{- end }}
+spec:
+{{- if .Values.prometheus.service.clusterIP }}
+ clusterIP: {{ .Values.prometheus.service.clusterIP }}
+{{- end }}
+{{- if .Values.prometheus.service.externalIPs }}
+ externalIPs:
+{{ toYaml .Values.prometheus.service.externalIPs | indent 4 }}
+{{- end }}
+{{- if .Values.prometheus.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.prometheus.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.prometheus.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.prometheus.service.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+{{- end }}
+ ports:
+ - name: web
+ {{- if eq .Values.prometheus.service.type "NodePort" }}
+ nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.prometheus.service.nodePort }}
+ {{- end }}
+ port: 9090
+ targetPort: web
+ selector:
+ app: prometheus
+ prometheus: {{ template "prometheus.fullname" . }}-prometheus
+ type: "{{ .Values.prometheus.service.type }}"
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/servicemonitor.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/servicemonitor.yaml
new file mode 100644
index 00000000..ea2b81b6
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus/templates/servicemonitor.yaml
@@ -0,0 +1,30 @@
+{{- if .Values.prometheus.additionalServiceMonitors }}
+apiVersion: v1
+kind: List
+items:
+{{- range .Values.prometheus.additionalServiceMonitors }}
+ - apiVersion: "monitoring.coreos.com/v1"
+ kind: ServiceMonitor
+ metadata:
+ name: {{ .name }}
+ "helm.sh/hook": post-install
+ "helm.sh/hook-weight": "1"
+ labels:
+ app: {{ template "prometheus.name" $ }}-prometheus
+{{ include "prometheus.labels" $ | indent 8 }}
+ {{- if .additionalLabels }}
+{{ toYaml .additionalLabels | indent 8 }}
+ {{- end }}
+ spec:
+ endpoints:
+{{ toYaml .endpoints | indent 8 }}
+ {{- if .jobLabel }}
+ jobLabel: {{ .jobLabel }}
+ {{- end }}
+ namespaceSelector:
+ matchNames:
+ - {{ $.Release.Namespace | quote }}
+ selector:
+{{ toYaml .selector | indent 8 }} release: {{ $.Release.Name | quote }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/collection/charts/prometheus/values.yaml b/vnfs/DAaaS/deploy/collection/charts/prometheus/values.yaml
new file mode 100644
index 00000000..4398f52f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/charts/prometheus/values.yaml
@@ -0,0 +1,79 @@
+## Deploy a Prometheus instance
+##
+prometheus:
+ additionalServiceMonitors:
+ - name: service-monitor-collectd
+ additionalLabels:
+ collector: collectd
+ jobLabel: collectd
+ selector:
+ matchLabels:
+ app: collectd
+ endpoints:
+ - port: collectd-prometheus
+ interval: 10s
+ path: /metrics
+ - name: service-monitor-node-exporter
+ additionalLabels:
+ collector: prometheus-node-exporter
+ jobLabel: node-exporter
+ selector:
+ matchLabels:
+ app: prometheus-node-exporter
+ endpoints:
+ - port: metrics
+ interval: 30s
+ - name: service-monitor-cadvisor
+ additionalLabels:
+ collector: cadvisor
+ jobLabel: cadvisor
+ selector:
+ matchLabels:
+ app: cadvisor
+ endpoints:
+ - port: cadvisor-prometheus
+ interval: 10s
+ path: /metrics
+
+ service:
+ annotations: {}
+ labels: {}
+ clusterIP: ""
+
+ ## To be used with a proxy extraContainer port
+ targetPort: 9090
+
+ ## List of IP addresses at which the Prometheus server service is available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+
+ ## Port to expose on each node
+ ## Only used if service.type is 'NodePort'
+ ##
+ nodePort: 90
+
+ ## Loadbalancer IP
+ ## Only use if service.type is "loadbalancer"
+ loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+ ## Service type
+ ##
+ type: NodePort
+
+ sessionAffinity: ""
+m3db:
+ url: http://m3coordinator-m3db.m3db-operator.svc.cluster.local:7201
+
+kafkaAdapter:
+ image:
+ repository: telefonica/prometheus-kafka-adapter
+ tag: 1.2.0
+ pullPolicy: IfNotPresent
+ url: http://localhost:8080
+ port: 8080
+ broker: my-cluster-kafka-bootstrap.msg.svc.cluster.local:9092
+ topic: orders
+ serializationFormat: json
+ logLevel: debug
+ resources: {}
diff --git a/vnfs/DAaaS/deploy/collection/values.yaml b/vnfs/DAaaS/deploy/collection/values.yaml
new file mode 100644
index 00000000..7fd13869
--- /dev/null
+++ b/vnfs/DAaaS/deploy/collection/values.yaml
@@ -0,0 +1,28 @@
+# Copyright © 2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 300
+ repository: nexus3.onap.org:10001
+ readinessRepository: oomk8s
+ readinessImage: readiness-check:2.0.0
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+
+#################################################################
+# Collection Package Day-0 configuration defaults.
+#################################################################
diff --git a/vnfs/DAaaS/deploy/day2_configs/collectd/README.txt b/vnfs/DAaaS/deploy/day2_configs/collectd/README.txt
new file mode 100644
index 00000000..e1d6e7da
--- /dev/null
+++ b/vnfs/DAaaS/deploy/day2_configs/collectd/README.txt
@@ -0,0 +1,14 @@
+Assuming initial/day0 config in namespace "edge1" and helm release name as "cp" (helm install -n cp collection/ --namespace=edge1)
+
+*******************Day2 config (add more plugins)
+This day2 config adds cpu, memory and cpufreq plugins to existing config
+
+Run below commands to apply day2 config
+
+1. kubectl patch --namespace=edge1 configmaps cp-collectd-config -p "$(cat add_plugins.yaml)"
+
+2. Restart pods: kubectl delete pods --namespace=edge1 cp-collectd-db7mf cp-collectd-pfc9t cp-collectd-sqjvq
+
+
+************Day3 config (replace image being used)
+1. kubectl patch --namespace=edge1 daemonset cp-collectd -p "$(cat replace_image.yaml)"
diff --git a/vnfs/DAaaS/deploy/day2_configs/collectd/add_plugins.yaml b/vnfs/DAaaS/deploy/day2_configs/collectd/add_plugins.yaml
new file mode 100644
index 00000000..9e80ce8d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/day2_configs/collectd/add_plugins.yaml
@@ -0,0 +1,47 @@
+data:
+ node-collectd.conf: |-
+ FQDNLookup false
+
+ LoadPlugin cpu
+ LoadPlugin memory
+ LoadPlugin cpufreq
+ LoadPlugin disk
+ LoadPlugin ethstat
+ LoadPlugin ipc
+ LoadPlugin ipmi
+ LoadPlugin load
+ LoadPlugin numa
+ LoadPlugin processes
+ LoadPlugin df
+ LoadPlugin turbostat
+ LoadPlugin uptime
+ LoadPlugin contextswitch
+ LoadPlugin irq
+ LoadPlugin df
+ LoadPlugin swap
+ LoadPlugin write_prometheus
+
+ LoadPlugin logfile
+ <Plugin logfile>
+ LogLevel info
+ File "/var/log/collectd.log"
+ Timestamp true
+ PrintSeverity false
+ </Plugin>
+ <Plugin "cpu">
+ Interval 5
+ ReportByState false
+ ReportByCpu false
+ </Plugin>
+
+ <Plugin "memory">
+ Interval 30
+ ValuesAbsolute false
+ ValuesPercentage true
+ </Plugin>
+
+ <Plugin "write_prometheus">
+ Port "{{ .Values.prometheus_port }}"
+ </Plugin>
+
+ #Last line (collectd requires ‘\n’ at the last line)
diff --git a/vnfs/DAaaS/deploy/day2_configs/collectd/replace_image.yaml b/vnfs/DAaaS/deploy/day2_configs/collectd/replace_image.yaml
new file mode 100644
index 00000000..44e66948
--- /dev/null
+++ b/vnfs/DAaaS/deploy/day2_configs/collectd/replace_image.yaml
@@ -0,0 +1,6 @@
+spec:
+ template:
+ spec:
+ containers:
+ - name: collectd
+ image: "opnfv/barometer-collectd:stable"
diff --git a/vnfs/DAaaS/deploy/day2_configs/prometheus/README.txt b/vnfs/DAaaS/deploy/day2_configs/prometheus/README.txt
new file mode 100644
index 00000000..99b63fac
--- /dev/null
+++ b/vnfs/DAaaS/deploy/day2_configs/prometheus/README.txt
@@ -0,0 +1,7 @@
+This day2 config adds a remote_write to existing config
+
+Assuming initial/day0 config in namespace "edge1" and helm release name as "cp" (helm install -n cp collection/ --namespace=edge1)
+
+Run below command to apply day2 config
+
+kubectl patch --namespace=edge1 prometheus cp-prometheus-prometheus -p "$(cat add_remote_write.yaml)" --type=merge
diff --git a/vnfs/DAaaS/deploy/day2_configs/prometheus/add_remote_write.yaml b/vnfs/DAaaS/deploy/day2_configs/prometheus/add_remote_write.yaml
new file mode 100644
index 00000000..48af9017
--- /dev/null
+++ b/vnfs/DAaaS/deploy/day2_configs/prometheus/add_remote_write.yaml
@@ -0,0 +1,13 @@
+spec:
+ remoteRead:
+ - url: "http://m3coordinator-m3db.m3db-operator.svc.cluster.local:7201/api/v1/prom/remote/read"
+ readRecent: true
+ remoteWrite:
+ - url: "http://m3coordinator-m3db.m3db-operator.svc.cluster.local:7201/api/v1/prom/remote/write"
+ writeRelabelConfigs:
+ - targetLabel: metrics_storage
+ replacement: m3db_remote
+ - url: "http://m3coordinator-m3db-cluster.edge1.svc.cluster.local:7201/api/v1/prom/remote/write"
+ writeRelabelConfigs:
+ - targetLabel: metrics_storage_day2
+ replacement: m3db_remote_day2
diff --git a/vnfs/DAaaS/deploy/inference-core/.helmignore b/vnfs/DAaaS/deploy/inference-core/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/deploy/inference-core/Chart.yaml b/vnfs/DAaaS/deploy/inference-core/Chart.yaml
new file mode 100644
index 00000000..836b1a84
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for inference framework components
+name: inference-core
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/.helmignore b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/Chart.yaml b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/Chart.yaml
new file mode 100644
index 00000000..2db1a483
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: Helm chart for Tensorflow serving model server
+name: tf-serving
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/NOTES.txt b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/NOTES.txt
new file mode 100644
index 00000000..2dcf639e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/NOTES.txt
@@ -0,0 +1,20 @@
+1. Get the tensorflow serving URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range $host := .Values.ingress.hosts }}
+ {{- range $.Values.ingress.paths }}
+ http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }}
+ {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "tf-serving.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "tf-serving.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "tf-serving.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export SVC_NAME=$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app={{ include "tf-serving.name" . }},modelName={{ .Values.modelName }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ kubectl port-forward svc/$SVC_NAME 8500
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/_helpers.tpl b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/_helpers.tpl
new file mode 100644
index 00000000..ce3ce917
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/_helpers.tpl
@@ -0,0 +1,41 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "tf-serving.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "tf-serving.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.modelName | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "tf-serving.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/* Create the name of tf-serving service account to use */}}
+{{- define "tf-serving.serviceAccountName" -}}
+{{- if and .Values.global.rbac .Values.serviceAccount.create -}}
+ {{ default (include "tf-serving.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/deployment.yaml b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/deployment.yaml
new file mode 100644
index 00000000..0a909e9f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/deployment.yaml
@@ -0,0 +1,138 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: {{ include "tf-serving.fullname" . }}
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ include "tf-serving.name" . }}
+ release: {{ .Release.Name }}
+ modelName: {{ .Values.modelName }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ release: {{ .Release.Name }}
+ modelName: {{ .Values.modelName }}
+ spec:
+ serviceAccountName: {{ template "tf-serving.serviceAccountName" . }}
+ containers:
+ - name: tf-serving
+ image: "{{ .Values.image.tensorflowServing.repository }}:{{ .Values.image.tensorflowServing.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - "/usr/bin/tensorflow_model_server"
+ args:
+ - "--port={{ .Values.service.tensorflowServingPort }}"
+ - "--model_name={{ .Values.modelName }}"
+ - "--model_base_path={{ .Values.modelBasePath }}"
+ ports:
+ - name: tf-serving
+ containerPort: {{ .Values.service.tensorflowServingPort }}
+ protocol: TCP
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.minio.existingSecret }}{{ .Values.minio.existingSecret }}{{ else }}{{ template "tf-serving.fullname" . }}{{ end }}
+ key: accesskey
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.minio.existingSecret }}{{ .Values.minio.existingSecret }}{{ else }}{{ template "tf-serving.fullname" . }}{{ end }}
+ key: secretkey
+ {{- range $key, $val := .Values.minio.environment }}
+ - name: {{ $key }}
+ value: {{ $val | quote }}
+ {{- end}}
+ readinessProbe:
+ tcpSocket:
+ port: tf-serving
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ volumeMounts:
+ - mountPath: /models
+ name: models
+ - name: tensorboard
+ image: "{{ .Values.image.tensorboard.repository }}:{{ .Values.image.tensorboard.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - tensorboard
+ args:
+ - --logdir
+ - {{ .Values.modelBasePath }}
+ - --host
+ - 0.0.0.0
+ ports:
+ - name: tensorboard
+ containerPort: {{ .Values.service.tensorboardPort }}
+ protocol: TCP
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.minio.existingSecret }}{{ .Values.minio.existingSecret }}{{ else }}{{ template "tf-serving.fullname" . }}{{ end }}
+ key: accesskey
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.minio.existingSecret }}{{ .Values.minio.existingSecret }}{{ else }}{{ template "tf-serving.fullname" . }}{{ end }}
+ key: secretkey
+ {{- range $key, $val := .Values.minio.environment }}
+ - name: {{ $key }}
+ value: {{ $val | quote }}
+ {{- end}}
+ livenessProbe:
+ httpGet:
+ path: /
+ port: {{ .Values.service.tensorboardPort }}
+ readinessProbe:
+ httpGet:
+ path: /
+ port: {{ .Values.service.tensorboardPort }}
+ volumeMounts:
+ - mountPath: /output/training_logs
+ name: training-logs-volume
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ volumes:
+ - name: models
+ emptyDir: {}
+ - name: training-logs-volume
+ emptyDir: {}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/ingress.yaml b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/ingress.yaml
new file mode 100644
index 00000000..b02fc8bb
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/ingress.yaml
@@ -0,0 +1,55 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "tf-serving.fullname" . -}}
+{{- $ingressPaths := .Values.ingress.paths -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ release: {{ .Release.Name }}
+ modelName: {{ .Values.modelName }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+{{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+{{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ . | quote }}
+ http:
+ paths:
+ {{- range $ingressPaths }}
+ - path: {{ . }}
+ backend:
+ serviceName: {{ $fullName }}
+ servicePort: http
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/secrets.yaml b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/secrets.yaml
new file mode 100644
index 00000000..f4b8fe89
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/secrets.yaml
@@ -0,0 +1,31 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if not .Values.minio.existingSecret }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "tf-serving.fullname" . }}
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service | quote }}
+type: Opaque
+data:
+ accesskey: {{ .Values.minio.accessKey | b64enc }}
+ secretkey: {{ .Values.minio.secretKey | b64enc }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/service.yaml b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/service.yaml
new file mode 100644
index 00000000..3ddcca66
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/service.yaml
@@ -0,0 +1,39 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "tf-serving.fullname" . }}
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ release: {{ .Release.Name }}
+ modelName: {{ .Values.modelName }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.tensorflowServingPort }}
+ targetPort: tf-serving
+ protocol: TCP
+ name: tf-serving
+ - port: {{ .Values.service.tensorboardPort }}
+ targetPort: tensorboard
+ protocol: TCP
+ name: tensorboard
+ selector:
+ app: {{ include "tf-serving.name" . }}
+ release: {{ .Release.Name }}
+ modelName: {{ .Values.modelName }}
diff --git a/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/serviceaccount.yaml b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/serviceaccount.yaml
new file mode 100644
index 00000000..af4987d8
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/templates/serviceaccount.yaml
@@ -0,0 +1,25 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "tf-serving.fullname" . }}
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service | quote }}
diff --git a/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/values.yaml b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/values.yaml
new file mode 100644
index 00000000..ebf88683
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/charts/tf-serving/values.yaml
@@ -0,0 +1,71 @@
+# Default values for tf-serving.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ tensorflowServing:
+ repository: tensorflow/serving
+ tag: latest
+ tensorboard:
+ repository: tensorflow/tensorflow
+ tag: latest
+ pullPolicy: IfNotPresent
+
+nameOverride: ""
+fullnameOverride: ""
+
+## Model information for tf-serving
+modelName: "mnist"
+modelBasePath: "s3://models/mnist"
+
+## Model repository information (Minio)
+minio:
+ existingSecret: ""
+ accessKey: "onapdaas"
+ secretKey: "onapsecretdaas"
+ environment:
+ AWS_REGION: ""
+ S3_REGION: ""
+ S3_ENDPOINT: "minio.minio.svc.cluster.local:9000"
+ AWS_ENDPOINT_URL: "http://minio.minio.svc.cluster.local:9000"
+ S3_USE_HTTPS: 0
+ S3_VERIFY_SSL: 0
+
+
+## Service account for tf-serving to use.
+serviceAccount:
+ create: true
+ name: ""
+
+service:
+ type: ClusterIP
+ tensorflowServingPort: 8500
+ tensorboardPort: 6006
+
+ingress:
+ enabled: false
+ annotations: {}
+ paths: []
+ hosts:
+ - chart-example.local
+ tls: []
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/vnfs/DAaaS/deploy/inference-core/values.yaml b/vnfs/DAaaS/deploy/inference-core/values.yaml
new file mode 100644
index 00000000..10714b88
--- /dev/null
+++ b/vnfs/DAaaS/deploy/inference-core/values.yaml
@@ -0,0 +1,30 @@
+# Copyright © 2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 310
+ rbac: true
+ repository: nexus3.onap.org:10001
+ readinessRepository: oomk8s
+ readinessImage: readiness-check:2.0.0
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+
+#################################################################
+# k8s Operator Day-0 configuration defaults.
+#################################################################
+
diff --git a/vnfs/DAaaS/deploy/messaging/.helmignore b/vnfs/DAaaS/deploy/messaging/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/deploy/messaging/Chart.yaml b/vnfs/DAaaS/deploy/messaging/Chart.yaml
new file mode 100644
index 00000000..a2e468f8
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: Helm chart for messaging and data distribution components
+name: messaging
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/messaging/charts/kafka/.helmignore b/vnfs/DAaaS/deploy/messaging/charts/kafka/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/kafka/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/deploy/messaging/charts/kafka/Chart.yaml b/vnfs/DAaaS/deploy/messaging/charts/kafka/Chart.yaml
new file mode 100644
index 00000000..b0558bb8
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/kafka/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: Helm chart for kafka operator
+name: kafka
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/messaging/charts/kafka/templates/kafka-cluster.yaml b/vnfs/DAaaS/deploy/messaging/charts/kafka/templates/kafka-cluster.yaml
new file mode 100644
index 00000000..cf71fca7
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/kafka/templates/kafka-cluster.yaml
@@ -0,0 +1,29 @@
+apiVersion: kafka.strimzi.io/v1alpha1
+kind: Kafka
+metadata:
+ name: {{ .Values.cluster_name }}
+spec:
+ kafka:
+ version: {{ .Values.version }}
+ replicas: {{ .Values.replicas }}
+ listeners:
+ plain: {}
+ tls: {}
+ config:
+ offsets.topic.replication.factor: {{ .Values.topic.replicationFactor }}
+ transaction.state.log.replication.factor: 3
+ transaction.state.log.min.isr: 2
+ log.message.format.version: "2.1"
+ storage:
+ type: persistent-claim
+ size: {{ .Values.storage }}
+ deleteClaim: false
+ zookeeper:
+ replicas: {{ .Values.zookeeper.replicas }}
+ storage:
+ type: persistent-claim
+ size: {{ .Values.zookeeper.storage }}
+ deleteClaim: false
+ entityOperator:
+ topicOperator: {}
+ userOperator: {}
diff --git a/vnfs/DAaaS/deploy/messaging/charts/kafka/values.yaml b/vnfs/DAaaS/deploy/messaging/charts/kafka/values.yaml
new file mode 100644
index 00000000..a5e615c9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/kafka/values.yaml
@@ -0,0 +1,30 @@
+# Default values for kafka.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+cluster_name: my-cluster
+version: 2.1.0
+replicas: 3
+topic:
+ replicationFactor: 3
+storage: 100Gi
+zookeeper:
+ replicas: 3
+ storage: 100Gi
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/.helmignore b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/.helmignore
new file mode 100644
index 00000000..f0c13194
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/Chart.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/Chart.yaml
new file mode 100644
index 00000000..64915c66
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+appVersion: "0.1.0"
+description: "Strimzi: Kafka as a Service"
+name: strimzi-kafka-operator
+version: 0.1.0
+icon: https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/master/documentation/logo/strimzi_logo.png
+keywords:
+- kafka
+- queue
+- stream
+- event
+- messaging
+- datastore
+- topic
+home: http://strimzi.io/
+sources:
+- https://github.com/strimzi/strimzi-kafka-operator
+maintainers:
+- name: ppatierno
+- name: scholzj
+- name: tombentley \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/OWNERS b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/OWNERS
new file mode 100644
index 00000000..2aeed6e5
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/OWNERS
@@ -0,0 +1,8 @@
+approvers:
+- ppatierno
+- scholzj
+- tombentley
+reviewers:
+- ppatierno
+- scholzj
+- tombentley \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/README.md b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/README.md
new file mode 100644
index 00000000..35226be0
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/README.md
@@ -0,0 +1,105 @@
+# Strimzi: Kafka as a Service
+
+Strimzi provides a way to run an [Apache Kafka](https://kafka.apache.org/) cluster on
+[Kubernetes](https://kubernetes.io/) or [OpenShift](https://www.openshift.com/) in various deployment configurations.
+See our [website](https://github.com/strimzi/strimzi-kafka-operator) for more details about the project.
+
+## Introduction
+
+This chart bootstraps the Strimzi Cluster Operator Deployment, Cluster Roles, Cluster Role Bindings, Service Accounts, and
+Custom Resource Definitions for running [Apache Kafka](https://kafka.apache.org/) on [Kubernetes](http://kubernetes.io)
+cluster using the [Helm](https://helm.sh) package manager.
+
+## Prerequisites
+
+- Kubernetes 1.9+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+Add the Strimzi Helm Chart repository:
+
+```bash
+$ helm repo add strimzi http://strimzi.io/charts/
+```
+
+To install the chart with the release name `my-release`:
+
+```bash
+$ helm install --name my-release strimzi/strimzi-kafka-operator
+```
+
+The command deploys the Strimzi Cluster Operator on the Kubernetes cluster with the default configuration.
+The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```bash
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the operator and deletes the release.
+
+## Configuration
+
+The following table lists the configurable parameters of the Strimzi chart and their default values. Runtime
+configuration of Kafka and other components are defined within their respective Custom Resource Definitions. See
+the documentation for more details.
+
+| Parameter | Description | Default |
+| ------------------------------------ | ----------------------------------------- | ---------------------------------------------------- |
+| `watchNamespaces` | Comma separated list of additional namespaces for the strimzi-operator to watch | [] |
+| `image.repository` | Cluster Operator image repository | `strimzi` |
+| `image.name` | Cluster Operator image name | `cluster-operator` |
+| `image.tag` | Cluster Operator image tag | `0.11.0` |
+| `image.imagePullPolicy` | Cluster Operator image pull policy | `IfNotPresent` |
+| `logLevel` | Cluster Operator log level | `INFO` |
+| `fullReconciliationIntervalMs` | Full reconciliation interval in milliseconds | 120000 |
+| `operationTimeoutMs` | Operation timeout in milliseconds | 300000 |
+| `zookeeper.image.repository` | ZooKeeper image repository | `strimzi` |
+| `zookeeper.image.name` | ZooKeeper image name | `zookeeper` |
+| `zookeeper.image.tag` | ZooKeeper image tag | `0.11.0` |
+| `kafka.image.repository` | Kafka image repository | `strimzi` |
+| `kafka.image.name` | Kafka image name | `kafka` |
+| `kafka.image.tagPrefix` | Kafka image tag prefix | `0.11.0` |
+| `kafkaConnect.image.repository` | Kafka Connect image repository | `strimzi` |
+| `kafkaConnect.image.name` | Kafka Connect image name | `kafka-connect` |
+| `kafkaConnect.image.tagPrefix` | Kafka Connect image tag prefix | `0.11.0` |
+| `kafkaConnects2i.image.repository` | Kafka Connect s2i image repository | `strimzi` |
+| `kafkaConnects2i.image.name` | Kafka Connect s2i image name | `kafka-connect-s2i` |
+| `kafkaConnects2i.image.tagPrefix` | Kafka Connect s2i image tag prefix | `0.11.0` |
+| `kafkaMirrorMaker.image.repository` | Kafka Mirror Maker image repository | `strimzi` |
+| `kafkaMirrorMaker.image.name` | Kafka Mirror Maker image name | `kafka` |
+| `kafkaMirrorMaker.image.tagPrefix` | Kafka Mirror Maker image tag prefix | `0.11.0` |
+| `topicOperator.image.repository` | Topic Operator image repository | `strimzi` |
+| `topicOperator.image.name` | Topic Operator s2i image name | `topic-operator` |
+| `topicOperator.image.tag` | Topic Operator s2i image tag | `0.11.0` |
+| `kafkaInit.image.repository` | Init Kafka image repository | `strimzi` |
+| `kafkaInit.image.name` | Init Kafka image name | `kafka-init` |
+| `kafkaInit.image.tag` | Init Kafka image tag | `0.11.0` |
+| `tlsSidecarZookeeper.image.repository` | TLS Sidecar for ZooKeeper image repository | `strimzi` |
+| `tlsSidecarZookeeper.image.name` | TLS Sidecar for ZooKeeper image name | `zookeeper-stunnel` |
+| `tlsSidecarZookeeper.image.tag` | TLS Sidecar for ZooKeeper image tag | `0.11.0` |
+| `tlsSidecarKafka.image.repository` | TLS Sidecar for Kafka image repository | `strimzi` |
+| `tlsSidecarKafka.image.name` | TLS Sidecar for Kafka image name | `kafka-stunnel` |
+| `tlsSidecarKafka.image.tag` | TLS Sidecar for Kafka image tag | `0.11.0` |
+| `tlsSidecarTopicOperator.image.repository` | TLS Sidecar for Topic Operator image repository | `strimzi` |
+| `tlsSidecarTopicOperator.image.name` | TLS Sidecar for Topic Operator image name | `topic-operator-stunnel` |
+| `tlsSidecarTopicOperator.image.tag` | TLS Sidecar for Topic Operator image tag | `0.11.0` |
+| `resources.limits.memory` | Memory constraint for limits | `256Mi` |
+| `resources.limits.cpu` | CPU constraint for limits | `1000m` |
+| `resources.requests.memory` | Memory constraint for requests | `256Mi` |
+| `livenessProbe.initialDelaySeconds` | Liveness probe initial delay in seconds | 10 |
+| `livenessProbe.periodSeconds` | Liveness probe period in seconds | 30 |
+| `readinessProbe.initialDelaySeconds` | Readiness probe initial delay in seconds | 10 |
+| `readinessProbe.periodSeconds` | Readiness probe period in seconds | 30 |
+| `imageRepositoryOverride` | Override all image repository config | `nil` |
+| `imageTagOverride` | Override all image tag config | `nil` |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```bash
+$ helm install --name my-release --set logLevel=DEBUG,fullReconciliationIntervalMs=240000 strimzi/strimzi-kafka-operator
+```
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/010-ServiceAccount-strimzi-cluster-operator.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/010-ServiceAccount-strimzi-cluster-operator.yaml
new file mode 100644
index 00000000..45367ad2
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/010-ServiceAccount-strimzi-cluster-operator.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: strimzi-cluster-operator
+ labels:
+ app: {{ template "strimzi.name" . }}
+ chart: {{ template "strimzi.chart" . }}
+ component: service-account
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/020-ClusterRole-strimzi-cluster-operator-role.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/020-ClusterRole-strimzi-cluster-operator-role.yaml
new file mode 100644
index 00000000..12663c74
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/020-ClusterRole-strimzi-cluster-operator-role.yaml
@@ -0,0 +1,259 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: strimzi-cluster-operator-namespaced
+ labels:
+ app: {{ template "strimzi.name" . }}
+ chart: {{ template "strimzi.chart" . }}
+ component: role
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - get
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - "rbac.authorization.k8s.io"
+ resources:
+ - rolebindings
+ verbs:
+ - get
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - "kafka.strimzi.io"
+ resources:
+ - kafkas
+ - kafkaconnects
+ - kafkaconnects2is
+ - kafkamirrormakers
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - "extensions"
+ resources:
+ - deployments
+ - deployments/scale
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - "apps"
+ resources:
+ - deployments
+ - deployments/scale
+ - deployments/status
+ - statefulsets
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+# OpenShift S2I requirements
+- apiGroups:
+ - "extensions"
+ resources:
+ - replicationcontrollers
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - apps.openshift.io
+ resources:
+ - deploymentconfigs
+ - deploymentconfigs/scale
+ - deploymentconfigs/status
+ - deploymentconfigs/finalizers
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - build.openshift.io
+ resources:
+ - buildconfigs
+ - builds
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+ - update
+- apiGroups:
+ - image.openshift.io
+ resources:
+ - imagestreams
+ - imagestreams/status
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - watch
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - replicationcontrollers
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - route.openshift.io
+ resources:
+ - routes
+ - routes/custom-host
+ verbs:
+ - get
+ - list
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/020-RoleBinding-strimzi-cluster-operator.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/020-RoleBinding-strimzi-cluster-operator.yaml
new file mode 100644
index 00000000..647774b5
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/020-RoleBinding-strimzi-cluster-operator.yaml
@@ -0,0 +1,25 @@
+{{- $root := . -}}
+{{- range append .Values.watchNamespaces .Release.Namespace }}
+{{- if ne . "*" -}}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+ name: strimzi-cluster-operator
+ namespace: {{ . }}
+ labels:
+ app: {{ template "strimzi.name" $root }}
+ chart: {{ template "strimzi.chart" $root }}
+ component: role-binding
+ release: {{ $root.Release.Name }}
+ heritage: {{ $root.Release.Service }}
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: {{ $root.Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: strimzi-cluster-operator-namespaced
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/021-ClusterRole-strimzi-cluster-operator-role.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/021-ClusterRole-strimzi-cluster-operator-role.yaml
new file mode 100644
index 00000000..da7f2cda
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/021-ClusterRole-strimzi-cluster-operator-role.yaml
@@ -0,0 +1,21 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: strimzi-cluster-operator-global
+ labels:
+ app: {{ template "strimzi.name" . }}
+ chart: {{ template "strimzi.chart" . }}
+ component: role
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+rules:
+- apiGroups:
+ - "rbac.authorization.k8s.io"
+ resources:
+ - clusterrolebindings
+ verbs:
+ - get
+ - create
+ - delete
+ - patch
+ - update \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/021-ClusterRoleBinding-strimzi-cluster-operator.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/021-ClusterRoleBinding-strimzi-cluster-operator.yaml
new file mode 100644
index 00000000..5e32c34c
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/021-ClusterRoleBinding-strimzi-cluster-operator.yaml
@@ -0,0 +1,18 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: strimzi-cluster-operator
+ labels:
+ app: {{ template "strimzi.name" . }}
+ chart: {{ template "strimzi.chart" . }}
+ component: role-binding
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: strimzi-cluster-operator-global
+ apiGroup: rbac.authorization.k8s.io
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/030-ClusterRole-strimzi-kafka-broker.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/030-ClusterRole-strimzi-kafka-broker.yaml
new file mode 100644
index 00000000..7ef0faf2
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/030-ClusterRole-strimzi-kafka-broker.yaml
@@ -0,0 +1,17 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: strimzi-kafka-broker
+ labels:
+ app: {{ template "strimzi.name" . }}
+ chart: {{ template "strimzi.chart" . }}
+ component: broker-role
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/030-ClusterRoleBinding-strimzi-cluster-operator-kafka-broker-delegation.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/030-ClusterRoleBinding-strimzi-cluster-operator-kafka-broker-delegation.yaml
new file mode 100644
index 00000000..744238c9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/030-ClusterRoleBinding-strimzi-cluster-operator-kafka-broker-delegation.yaml
@@ -0,0 +1,18 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: strimzi-cluster-operator-kafka-broker-delegation
+ labels:
+ app: {{ template "strimzi.name" . }}
+ chart: {{ template "strimzi.chart" . }}
+ component: broker-role-binding
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: strimzi-kafka-broker
+ apiGroup: rbac.authorization.k8s.io
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/031-ClusterRole-strimzi-entity-operator.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/031-ClusterRole-strimzi-entity-operator.yaml
new file mode 100644
index 00000000..d6d6453a
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/031-ClusterRole-strimzi-entity-operator.yaml
@@ -0,0 +1,52 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: strimzi-entity-operator
+ labels:
+ app: {{ template "strimzi.name" . }}
+ chart: {{ template "strimzi.chart" . }}
+ component: entity-operator-role
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+rules:
+- apiGroups:
+ - "kafka.strimzi.io"
+ resources:
+ - kafkatopics
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - patch
+ - update
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+- apiGroups:
+ - "kafka.strimzi.io"
+ resources:
+ - kafkausers
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - patch
+ - update
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ - create
+ - patch
+ - update
+ - delete
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml
new file mode 100644
index 00000000..20b163de
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml
@@ -0,0 +1,25 @@
+{{- $root := . -}}
+{{- range append .Values.watchNamespaces .Release.Namespace }}
+{{- if ne . "*" -}}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+ name: strimzi-cluster-operator-entity-operator-delegation
+ namespace: {{ . }}
+ labels:
+ app: {{ template "strimzi.name" $root }}
+ chart: {{ template "strimzi.chart" $root }}
+ component: entity-operator-role-binding
+ release: {{ $root.Release.Name }}
+ heritage: {{ $root.Release.Service }}
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: {{ $root.Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: strimzi-entity-operator
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/032-ClusterRole-strimzi-topic-operator.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/032-ClusterRole-strimzi-topic-operator.yaml
new file mode 100644
index 00000000..4d470e40
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/032-ClusterRole-strimzi-topic-operator.yaml
@@ -0,0 +1,29 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: strimzi-topic-operator
+ labels:
+ app: {{ template "strimzi.name" . }}
+ chart: {{ template "strimzi.chart" . }}
+ component: topic-operator-role
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+rules:
+- apiGroups:
+ - "kafka.strimzi.io"
+ resources:
+ - kafkatopics
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - patch
+ - update
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/032-RoleBinding-strimzi-cluster-operator-topic-operator-delegation.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/032-RoleBinding-strimzi-cluster-operator-topic-operator-delegation.yaml
new file mode 100644
index 00000000..29f3d36b
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/032-RoleBinding-strimzi-cluster-operator-topic-operator-delegation.yaml
@@ -0,0 +1,25 @@
+{{- $root := . -}}
+{{- range append .Values.watchNamespaces .Release.Namespace }}
+{{- if ne . "*" -}}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+ name: strimzi-cluster-operator-topic-operator-delegation
+ namespace: {{ . }}
+ labels:
+ app: {{ template "strimzi.name" $root }}
+ chart: {{ template "strimzi.chart" $root }}
+ component: topic-operator-role-binding
+ release: {{ $root.Release.Name }}
+ heritage: {{ $root.Release.Service }}
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: {{ $root.Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: strimzi-topic-operator
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/040-Crd-kafka.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/040-Crd-kafka.yaml
new file mode 100644
index 00000000..d2e72949
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/040-Crd-kafka.yaml
@@ -0,0 +1,2123 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkas.kafka.strimzi.io
+ labels:
+ app: '{{ template "strimzi.name" . }}'
+ chart: '{{ template "strimzi.chart" . }}'
+ component: kafkas.kafka.strimzi.io-crd
+ release: '{{ .Release.Name }}'
+ heritage: '{{ .Release.Service }}'
+ annotations:
+ "helm.sh/hook": crd-install
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ group: kafka.strimzi.io
+ version: v1alpha1
+ scope: Namespaced
+ names:
+ kind: Kafka
+ listKind: KafkaList
+ singular: kafka
+ plural: kafkas
+ shortNames:
+ - k
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ type: object
+ properties:
+ kafka:
+ type: object
+ properties:
+ replicas:
+ type: integer
+ minimum: 1
+ image:
+ type: string
+ storage:
+ type: object
+ properties:
+ class:
+ type: string
+ deleteClaim:
+ type: boolean
+ id:
+ type: integer
+ minimum: 0
+ selector:
+ type: object
+ size:
+ type: string
+ type:
+ type: string
+ enum:
+ - ephemeral
+ - persistent-claim
+ - jbod
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ class:
+ type: string
+ deleteClaim:
+ type: boolean
+ id:
+ type: integer
+ minimum: 0
+ selector:
+ type: object
+ size:
+ type: string
+ type:
+ type: string
+ enum:
+ - ephemeral
+ - persistent-claim
+ required:
+ - type
+ required:
+ - type
+ listeners:
+ type: object
+ properties:
+ plain:
+ type: object
+ properties:
+ authentication:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-512
+ required:
+ - type
+ networkPolicyPeers:
+ type: array
+ items:
+ type: object
+ properties:
+ ipBlock:
+ type: object
+ properties:
+ cidr:
+ type: string
+ except:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ podSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ tls:
+ type: object
+ properties:
+ authentication:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-512
+ required:
+ - type
+ networkPolicyPeers:
+ type: array
+ items:
+ type: object
+ properties:
+ ipBlock:
+ type: object
+ properties:
+ cidr:
+ type: string
+ except:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ podSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ external:
+ type: object
+ properties:
+ authentication:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-512
+ required:
+ - type
+ networkPolicyPeers:
+ type: array
+ items:
+ type: object
+ properties:
+ ipBlock:
+ type: object
+ properties:
+ cidr:
+ type: string
+ except:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ podSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ overrides:
+ type: object
+ properties:
+ bootstrap:
+ type: object
+ properties:
+ address:
+ type: string
+ nodePort:
+ type: integer
+ brokers:
+ type: array
+ items:
+ type: object
+ properties:
+ broker:
+ type: integer
+ advertisedHost:
+ type: string
+ advertisedPort:
+ type: integer
+ nodePort:
+ type: integer
+ tls:
+ type: boolean
+ type:
+ type: string
+ enum:
+ - route
+ - loadbalancer
+ - nodeport
+ required:
+ - type
+ authorization:
+ type: object
+ properties:
+ superUsers:
+ type: array
+ items:
+ type: string
+ type:
+ type: string
+ enum:
+ - simple
+ required:
+ - type
+ config:
+ type: object
+ rack:
+ type: object
+ properties:
+ topologyKey:
+ type: string
+ example: failure-domain.beta.kubernetes.io/zone
+ required:
+ - topologyKey
+ brokerRackInitImage:
+ type: string
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ jvmOptions:
+ type: object
+ properties:
+ -XX:
+ type: object
+ -Xms:
+ type: string
+ pattern: '[0-9]+[mMgG]?'
+ -Xmx:
+ type: string
+ pattern: '[0-9]+[mMgG]?'
+ gcLoggingEnabled:
+ type: boolean
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ metrics:
+ type: object
+ logging:
+ type: object
+ properties:
+ loggers:
+ type: object
+ name:
+ type: string
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ required:
+ - type
+ tlsSidecar:
+ type: object
+ properties:
+ image:
+ type: string
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ logLevel:
+ type: string
+ enum:
+ - emerg
+ - alert
+ - crit
+ - err
+ - warning
+ - notice
+ - info
+ - debug
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ template:
+ type: object
+ properties:
+ statefulset:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ pod:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ imagePullSecrets:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ securityContext:
+ type: object
+ properties:
+ fsGroup:
+ type: integer
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ bootstrapService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ brokersService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ externalBootstrapRoute:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ externalBootstrapService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ perPodRoute:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ perPodService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ podDisruptionBudget:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ maxUnavailable:
+ type: integer
+ minimum: 0
+ version:
+ type: string
+ required:
+ - replicas
+ - storage
+ - listeners
+ zookeeper:
+ type: object
+ properties:
+ replicas:
+ type: integer
+ minimum: 1
+ image:
+ type: string
+ storage:
+ type: object
+ properties:
+ class:
+ type: string
+ deleteClaim:
+ type: boolean
+ id:
+ type: integer
+ minimum: 0
+ selector:
+ type: object
+ size:
+ type: string
+ type:
+ type: string
+ enum:
+ - ephemeral
+ - persistent-claim
+ required:
+ - type
+ config:
+ type: object
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ jvmOptions:
+ type: object
+ properties:
+ -XX:
+ type: object
+ -Xms:
+ type: string
+ pattern: '[0-9]+[mMgG]?'
+ -Xmx:
+ type: string
+ pattern: '[0-9]+[mMgG]?'
+ gcLoggingEnabled:
+ type: boolean
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ metrics:
+ type: object
+ logging:
+ type: object
+ properties:
+ loggers:
+ type: object
+ name:
+ type: string
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ required:
+ - type
+ tlsSidecar:
+ type: object
+ properties:
+ image:
+ type: string
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ logLevel:
+ type: string
+ enum:
+ - emerg
+ - alert
+ - crit
+ - err
+ - warning
+ - notice
+ - info
+ - debug
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ template:
+ type: object
+ properties:
+ statefulset:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ pod:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ imagePullSecrets:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ securityContext:
+ type: object
+ properties:
+ fsGroup:
+ type: integer
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ clientService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ nodesService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ podDisruptionBudget:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ maxUnavailable:
+ type: integer
+ minimum: 0
+ required:
+ - replicas
+ - storage
+ topicOperator:
+ type: object
+ properties:
+ watchedNamespace:
+ type: string
+ image:
+ type: string
+ reconciliationIntervalSeconds:
+ type: integer
+ minimum: 0
+ zookeeperSessionTimeoutSeconds:
+ type: integer
+ minimum: 0
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ topicMetadataMaxAttempts:
+ type: integer
+ minimum: 0
+ tlsSidecar:
+ type: object
+ properties:
+ image:
+ type: string
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ logLevel:
+ type: string
+ enum:
+ - emerg
+ - alert
+ - crit
+ - err
+ - warning
+ - notice
+ - info
+ - debug
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ logging:
+ type: object
+ properties:
+ loggers:
+ type: object
+ name:
+ type: string
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ required:
+ - type
+ jvmOptions:
+ type: object
+ properties:
+ gcLoggingEnabled:
+ type: boolean
+ entityOperator:
+ type: object
+ properties:
+ topicOperator:
+ type: object
+ properties:
+ watchedNamespace:
+ type: string
+ image:
+ type: string
+ reconciliationIntervalSeconds:
+ type: integer
+ minimum: 0
+ zookeeperSessionTimeoutSeconds:
+ type: integer
+ minimum: 0
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ topicMetadataMaxAttempts:
+ type: integer
+ minimum: 0
+ logging:
+ type: object
+ properties:
+ loggers:
+ type: object
+ name:
+ type: string
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ required:
+ - type
+ jvmOptions:
+ type: object
+ properties:
+ gcLoggingEnabled:
+ type: boolean
+ userOperator:
+ type: object
+ properties:
+ watchedNamespace:
+ type: string
+ image:
+ type: string
+ reconciliationIntervalSeconds:
+ type: integer
+ minimum: 0
+ zookeeperSessionTimeoutSeconds:
+ type: integer
+ minimum: 0
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ logging:
+ type: object
+ properties:
+ loggers:
+ type: object
+ name:
+ type: string
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ required:
+ - type
+ jvmOptions:
+ type: object
+ properties:
+ gcLoggingEnabled:
+ type: boolean
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ tlsSidecar:
+ type: object
+ properties:
+ image:
+ type: string
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ logLevel:
+ type: string
+ enum:
+ - emerg
+ - alert
+ - crit
+ - err
+ - warning
+ - notice
+ - info
+ - debug
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ template:
+ type: object
+ properties:
+ deployment:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ pod:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ imagePullSecrets:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ securityContext:
+ type: object
+ properties:
+ fsGroup:
+ type: integer
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ clusterCa:
+ type: object
+ properties:
+ generateCertificateAuthority:
+ type: boolean
+ validityDays:
+ type: integer
+ minimum: 1
+ renewalDays:
+ type: integer
+ minimum: 1
+ certificateExpirationPolicy:
+ type: string
+ enum:
+ - renew-certificate
+ - replace-key
+ clientsCa:
+ type: object
+ properties:
+ generateCertificateAuthority:
+ type: boolean
+ validityDays:
+ type: integer
+ minimum: 1
+ renewalDays:
+ type: integer
+ minimum: 1
+ certificateExpirationPolicy:
+ type: string
+ enum:
+ - renew-certificate
+ - replace-key
+ maintenanceTimeWindows:
+ type: array
+ items:
+ type: string
+ required:
+ - kafka
+ - zookeeper
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/041-Crd-kafkaconnect.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/041-Crd-kafkaconnect.yaml
new file mode 100644
index 00000000..b08ac001
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/041-Crd-kafkaconnect.yaml
@@ -0,0 +1,559 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkaconnects.kafka.strimzi.io
+ labels:
+ app: '{{ template "strimzi.name" . }}'
+ chart: '{{ template "strimzi.chart" . }}'
+ component: kafkaconnects.kafka.strimzi.io-crd
+ release: '{{ .Release.Name }}'
+ heritage: '{{ .Release.Service }}'
+ annotations:
+ "helm.sh/hook": crd-install
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ group: kafka.strimzi.io
+ version: v1alpha1
+ scope: Namespaced
+ names:
+ kind: KafkaConnect
+ listKind: KafkaConnectList
+ singular: kafkaconnect
+ plural: kafkaconnects
+ shortNames:
+ - kc
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ type: object
+ properties:
+ replicas:
+ type: integer
+ image:
+ type: string
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ jvmOptions:
+ type: object
+ properties:
+ -XX:
+ type: object
+ -Xms:
+ type: string
+ pattern: '[0-9]+[mMgG]?'
+ -Xmx:
+ type: string
+ pattern: '[0-9]+[mMgG]?'
+ gcLoggingEnabled:
+ type: boolean
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ logging:
+ type: object
+ properties:
+ loggers:
+ type: object
+ name:
+ type: string
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ required:
+ - type
+ metrics:
+ type: object
+ template:
+ type: object
+ properties:
+ deployment:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ pod:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ imagePullSecrets:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ securityContext:
+ type: object
+ properties:
+ fsGroup:
+ type: integer
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ apiService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ podDisruptionBudget:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ maxUnavailable:
+ type: integer
+ minimum: 0
+ authentication:
+ type: object
+ properties:
+ certificateAndKey:
+ type: object
+ properties:
+ certificate:
+ type: string
+ key:
+ type: string
+ secretName:
+ type: string
+ required:
+ - certificate
+ - key
+ - secretName
+ passwordSecret:
+ type: object
+ properties:
+ password:
+ type: string
+ secretName:
+ type: string
+ required:
+ - password
+ - secretName
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-512
+ username:
+ type: string
+ required:
+ - type
+ bootstrapServers:
+ type: string
+ config:
+ type: object
+ externalConfiguration:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ secretKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - name
+ - valueFrom
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ name:
+ type: string
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ required:
+ - name
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ tls:
+ type: object
+ properties:
+ trustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ certificate:
+ type: string
+ secretName:
+ type: string
+ required:
+ - certificate
+ - secretName
+ required:
+ - trustedCertificates
+ version:
+ type: string
+ required:
+ - bootstrapServers
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/042-Crd-kafkaconnects2i.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/042-Crd-kafkaconnects2i.yaml
new file mode 100644
index 00000000..0b95c7ef
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/042-Crd-kafkaconnects2i.yaml
@@ -0,0 +1,561 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkaconnects2is.kafka.strimzi.io
+ labels:
+ app: '{{ template "strimzi.name" . }}'
+ chart: '{{ template "strimzi.chart" . }}'
+ component: kafkaconnects2is.kafka.strimzi.io-crd
+ release: '{{ .Release.Name }}'
+ heritage: '{{ .Release.Service }}'
+ annotations:
+ "helm.sh/hook": crd-install
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ group: kafka.strimzi.io
+ version: v1alpha1
+ scope: Namespaced
+ names:
+ kind: KafkaConnectS2I
+ listKind: KafkaConnectS2IList
+ singular: kafkaconnects2i
+ plural: kafkaconnects2is
+ shortNames:
+ - kcs2i
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ type: object
+ properties:
+ replicas:
+ type: integer
+ image:
+ type: string
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ timeoutSeconds:
+ type: integer
+ minimum: 0
+ jvmOptions:
+ type: object
+ properties:
+ -XX:
+ type: object
+ -Xms:
+ type: string
+ pattern: '[0-9]+[mMgG]?'
+ -Xmx:
+ type: string
+ pattern: '[0-9]+[mMgG]?'
+ gcLoggingEnabled:
+ type: boolean
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ logging:
+ type: object
+ properties:
+ loggers:
+ type: object
+ name:
+ type: string
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ required:
+ - type
+ metrics:
+ type: object
+ template:
+ type: object
+ properties:
+ deployment:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ pod:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ imagePullSecrets:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ securityContext:
+ type: object
+ properties:
+ fsGroup:
+ type: integer
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ apiService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ podDisruptionBudget:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ maxUnavailable:
+ type: integer
+ minimum: 0
+ authentication:
+ type: object
+ properties:
+ certificateAndKey:
+ type: object
+ properties:
+ certificate:
+ type: string
+ key:
+ type: string
+ secretName:
+ type: string
+ required:
+ - certificate
+ - key
+ - secretName
+ passwordSecret:
+ type: object
+ properties:
+ password:
+ type: string
+ secretName:
+ type: string
+ required:
+ - password
+ - secretName
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-512
+ username:
+ type: string
+ required:
+ - type
+ bootstrapServers:
+ type: string
+ config:
+ type: object
+ externalConfiguration:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ secretKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - name
+ - valueFrom
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ name:
+ type: string
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ required:
+ - name
+ insecureSourceRepository:
+ type: boolean
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ tls:
+ type: object
+ properties:
+ trustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ certificate:
+ type: string
+ secretName:
+ type: string
+ required:
+ - certificate
+ - secretName
+ required:
+ - trustedCertificates
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ version:
+ type: string
+ required:
+ - bootstrapServers
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/043-Crd-kafkatopic.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/043-Crd-kafkatopic.yaml
new file mode 100644
index 00000000..69bf8efa
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/043-Crd-kafkatopic.yaml
@@ -0,0 +1,44 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkatopics.kafka.strimzi.io
+ labels:
+ app: '{{ template "strimzi.name" . }}'
+ chart: '{{ template "strimzi.chart" . }}'
+ component: kafkatopics.kafka.strimzi.io-crd
+ release: '{{ .Release.Name }}'
+ heritage: '{{ .Release.Service }}'
+ annotations:
+ "helm.sh/hook": crd-install
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ group: kafka.strimzi.io
+ version: v1alpha1
+ scope: Namespaced
+ names:
+ kind: KafkaTopic
+ listKind: KafkaTopicList
+ singular: kafkatopic
+ plural: kafkatopics
+ shortNames:
+ - kt
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ type: object
+ properties:
+ partitions:
+ type: integer
+ minimum: 1
+ replicas:
+ type: integer
+ minimum: 1
+ maximum: 32767
+ config:
+ type: object
+ topicName:
+ type: string
+ required:
+ - partitions
+ - replicas
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/044-Crd-kafkauser.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/044-Crd-kafkauser.yaml
new file mode 100644
index 00000000..2c0bd552
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/044-Crd-kafkauser.yaml
@@ -0,0 +1,100 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkausers.kafka.strimzi.io
+ labels:
+ app: '{{ template "strimzi.name" . }}'
+ chart: '{{ template "strimzi.chart" . }}'
+ component: kafkausers.kafka.strimzi.io-crd
+ release: '{{ .Release.Name }}'
+ heritage: '{{ .Release.Service }}'
+ annotations:
+ "helm.sh/hook": crd-install
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ group: kafka.strimzi.io
+ version: v1alpha1
+ scope: Namespaced
+ names:
+ kind: KafkaUser
+ listKind: KafkaUserList
+ singular: kafkauser
+ plural: kafkausers
+ shortNames:
+ - ku
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ type: object
+ properties:
+ authentication:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-512
+ required:
+ - type
+ authorization:
+ type: object
+ properties:
+ acls:
+ type: array
+ items:
+ type: object
+ properties:
+ host:
+ type: string
+ operation:
+ type: string
+ enum:
+ - Read
+ - Write
+ - Create
+ - Delete
+ - Alter
+ - Describe
+ - ClusterAction
+ - AlterConfigs
+ - DescribeConfigs
+ - IdempotentWrite
+ - All
+ resource:
+ type: object
+ properties:
+ name:
+ type: string
+ patternType:
+ type: string
+ enum:
+ - literal
+ - prefix
+ type:
+ type: string
+ enum:
+ - topic
+ - group
+ - cluster
+ - transactionalId
+ required:
+ - type
+ type:
+ type: string
+ enum:
+ - allow
+ - deny
+ required:
+ - operation
+ - resource
+ type:
+ type: string
+ enum:
+ - simple
+ required:
+ - acls
+ - type
+ required:
+ - authentication
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/045-Crd-kafkamirrormaker.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/045-Crd-kafkamirrormaker.yaml
new file mode 100644
index 00000000..eeefe116
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/045-Crd-kafkamirrormaker.yaml
@@ -0,0 +1,526 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkamirrormakers.kafka.strimzi.io
+ labels:
+ app: '{{ template "strimzi.name" . }}'
+ chart: '{{ template "strimzi.chart" . }}'
+ component: kafkamirrormakers.kafka.strimzi.io-crd
+ release: '{{ .Release.Name }}'
+ heritage: '{{ .Release.Service }}'
+ annotations:
+ "helm.sh/hook": crd-install
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ group: kafka.strimzi.io
+ version: v1alpha1
+ scope: Namespaced
+ names:
+ kind: KafkaMirrorMaker
+ listKind: KafkaMirrorMakerList
+ singular: kafkamirrormaker
+ plural: kafkamirrormakers
+ shortNames:
+ - kmm
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ type: object
+ properties:
+ replicas:
+ type: integer
+ minimum: 1
+ image:
+ type: string
+ whitelist:
+ type: string
+ consumer:
+ type: object
+ properties:
+ numStreams:
+ type: integer
+ minimum: 1
+ groupId:
+ type: string
+ bootstrapServers:
+ type: string
+ authentication:
+ type: object
+ properties:
+ certificateAndKey:
+ type: object
+ properties:
+ certificate:
+ type: string
+ key:
+ type: string
+ secretName:
+ type: string
+ required:
+ - certificate
+ - key
+ - secretName
+ passwordSecret:
+ type: object
+ properties:
+ password:
+ type: string
+ secretName:
+ type: string
+ required:
+ - password
+ - secretName
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-512
+ username:
+ type: string
+ required:
+ - type
+ config:
+ type: object
+ tls:
+ type: object
+ properties:
+ trustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ certificate:
+ type: string
+ secretName:
+ type: string
+ required:
+ - certificate
+ - secretName
+ required:
+ - trustedCertificates
+ required:
+ - groupId
+ - bootstrapServers
+ producer:
+ type: object
+ properties:
+ bootstrapServers:
+ type: string
+ authentication:
+ type: object
+ properties:
+ certificateAndKey:
+ type: object
+ properties:
+ certificate:
+ type: string
+ key:
+ type: string
+ secretName:
+ type: string
+ required:
+ - certificate
+ - key
+ - secretName
+ passwordSecret:
+ type: object
+ properties:
+ password:
+ type: string
+ secretName:
+ type: string
+ required:
+ - password
+ - secretName
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-512
+ username:
+ type: string
+ required:
+ - type
+ config:
+ type: object
+ tls:
+ type: object
+ properties:
+ trustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ certificate:
+ type: string
+ secretName:
+ type: string
+ required:
+ - certificate
+ - secretName
+ required:
+ - trustedCertificates
+ required:
+ - bootstrapServers
+ resources:
+ type: object
+ properties:
+ limits:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ requests:
+ type: object
+ properties:
+ cpu:
+ type: string
+ pattern: '[0-9]+m?$'
+ memory:
+ type: string
+ pattern: '[0-9]+([kKmMgGtTpPeE]i?)?$'
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ jvmOptions:
+ type: object
+ properties:
+ -XX:
+ type: object
+ -Xms:
+ type: string
+ pattern: '[0-9]+[mMgG]?'
+ -Xmx:
+ type: string
+ pattern: '[0-9]+[mMgG]?'
+ gcLoggingEnabled:
+ type: boolean
+ logging:
+ type: object
+ properties:
+ loggers:
+ type: object
+ name:
+ type: string
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ required:
+ - type
+ metrics:
+ type: object
+ template:
+ type: object
+ properties:
+ deployment:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ pod:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ imagePullSecrets:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ securityContext:
+ type: object
+ properties:
+ fsGroup:
+ type: integer
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ podDisruptionBudget:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ type: object
+ annotations:
+ type: object
+ maxUnavailable:
+ type: integer
+ minimum: 0
+ version:
+ type: string
+ required:
+ - replicas
+ - whitelist
+ - consumer
+ - producer
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/050-Deployment-strimzi-cluster-operator.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/050-Deployment-strimzi-cluster-operator.yaml
new file mode 100644
index 00000000..2f9b570d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/050-Deployment-strimzi-cluster-operator.yaml
@@ -0,0 +1,74 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: strimzi-cluster-operator
+ labels:
+ app: {{ template "strimzi.name" . }}
+ chart: {{ template "strimzi.chart" . }}
+ component: deployment
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ name: strimzi-cluster-operator
+ spec:
+ serviceAccountName: strimzi-cluster-operator
+ containers:
+ - name: strimzi-cluster-operator
+ image: {{ default .Values.image.repository .Values.imageRepositoryOverride }}/{{ .Values.image.name }}:{{ default .Values.image.tag .Values.imageTagOverride }}
+ imagePullPolicy: {{ .Values.image.imagePullPolicy | quote }}
+ env:
+ - name: STRIMZI_NAMESPACE
+ {{- if .Values.watchNamespaces -}}
+ {{- $ns := .Values.watchNamespaces -}}
+ {{- if has "*" $ns }}
+ value: "*"
+ {{- else -}}
+ {{- $ns := append $ns .Release.Namespace }}
+ value: "{{ join "," $ns }}"
+ {{- end }}
+ {{- else }}
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- end }}
+ - name: STRIMZI_FULL_RECONCILIATION_INTERVAL_MS
+ value: {{ .Values.fullReconciliationIntervalMs | quote }}
+ - name: STRIMZI_OPERATION_TIMEOUT_MS
+ value: {{ .Values.operationTimeoutMs | quote }}
+ - name: STRIMZI_DEFAULT_ZOOKEEPER_IMAGE
+ value: "{{ default .Values.zookeeper.image.repository .Values.imageRepositoryOverride }}/{{ .Values.zookeeper.image.name }}:{{ default .Values.zookeeper.image.tag .Values.imageTagOverride }}-kafka-2.0.0"
+ {{- template "strimzi.kafka.image.map" . }}
+ - name: STRIMZI_DEFAULT_TOPIC_OPERATOR_IMAGE
+ value: "{{ default .Values.topicOperator.image.repository .Values.imageRepositoryOverride }}/{{ .Values.topicOperator.image.name }}:{{ default .Values.topicOperator.image.tag .Values.imageTagOverride }}"
+ - name: STRIMZI_DEFAULT_USER_OPERATOR_IMAGE
+ value: "{{ default .Values.userOperator.image.repository .Values.imageRepositoryOverride }}/{{ .Values.userOperator.image.name }}:{{ default .Values.userOperator.image.tag .Values.imageTagOverride }}"
+ - name: STRIMZI_DEFAULT_KAFKA_INIT_IMAGE
+ value: "{{ default .Values.kafkaInit.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafkaInit.image.name }}:{{ default .Values.kafkaInit.image.tag .Values.imageTagOverride }}"
+ - name: STRIMZI_DEFAULT_TLS_SIDECAR_ZOOKEEPER_IMAGE
+ value: "{{ default .Values.tlsSidecarZookeeper.image.repository .Values.imageRepositoryOverride }}/{{ .Values.tlsSidecarZookeeper.image.name }}:{{ default .Values.tlsSidecarZookeeper.image.tag .Values.imageTagOverride }}"
+ - name: STRIMZI_DEFAULT_TLS_SIDECAR_KAFKA_IMAGE
+ value: "{{ default .Values.tlsSidecarKafka.image.repository .Values.imageRepositoryOverride }}/{{ .Values.tlsSidecarKafka.image.name }}:{{ default .Values.tlsSidecarKafka.image.tag .Values.imageTagOverride }}"
+ - name: STRIMZI_DEFAULT_TLS_SIDECAR_ENTITY_OPERATOR_IMAGE
+ value: "{{ default .Values.tlsSidecarEntityOperator.image.repository .Values.imageRepositoryOverride }}/{{ .Values.tlsSidecarEntityOperator.image.name }}:{{ default .Values.tlsSidecarEntityOperator.image.tag .Values.imageTagOverride }}"
+ - name: STRIMZI_LOG_LEVEL
+ value: {{ .Values.logLevel | quote }}
+ livenessProbe:
+ httpGet:
+ path: /healthy
+ port: 8080
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8080
+ initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ strategy:
+ type: Recreate
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/NOTES.txt b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/NOTES.txt
new file mode 100644
index 00000000..b49a9787
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/NOTES.txt
@@ -0,0 +1,5 @@
+Thank you for installing {{ .Chart.Name }}-{{ .Chart.Version }}
+
+To create a Kafka cluster refer to the following documentation.
+
+http://strimzi.io/docs/{{ .Chart.Version }}/#kafka-cluster-str
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/_helpers.tpl b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/_helpers.tpl
new file mode 100644
index 00000000..c2aac512
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/_helpers.tpl
@@ -0,0 +1,49 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "strimzi.name" -}}
+{{- default "strimzi" .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "strimzi.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "strimzi.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Generate a docker registry prefix or empty string.
+
+NOTE: Not currently being used. Is this useful?
+*/}}
+{{- define "dockerRegistryOverride" -}}
+{{- if .Values.dockerRegistryOverride -}}
+{{- printf "%s/" .Values.image.dockerRegistry -}}
+{{- else -}}
+{{- "" -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "imageRepositoryOverride" -}}
+{{- .Values.imageRepositoryOverride -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/_kafka_image_map.tpl b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/_kafka_image_map.tpl
new file mode 100644
index 00000000..fa22632e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/templates/_kafka_image_map.tpl
@@ -0,0 +1,28 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/* This file is generated in helm-charts/Makefile */}}
+{{/* DO NOT EDIT BY HAND */}}
+
+{{/* Generate the kafka image map */}}
+{{- define "strimzi.kafka.image.map" }}
+ - name: STRIMZI_KAFKA_IMAGES
+ value: |
+ 2.0.0={{ default .Values.kafka.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafka.image.name }}:{{ default .Values.kafka.image.tagPrefix .Values.imageTagOverride }}-kafka-2.0.0
+ 2.0.1={{ default .Values.kafka.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafka.image.name }}:{{ default .Values.kafka.image.tagPrefix .Values.imageTagOverride }}-kafka-2.0.1
+ 2.1.0={{ default .Values.kafka.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafka.image.name }}:{{ default .Values.kafka.image.tagPrefix .Values.imageTagOverride }}-kafka-2.1.0
+ - name: STRIMZI_KAFKA_CONNECT_IMAGES
+ value: |
+ 2.0.0={{ default .Values.kafkaConnect.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafkaConnect.image.name }}:{{ default .Values.kafkaConnect.image.tagPrefix .Values.imageTagOverride }}-kafka-2.0.0
+ 2.0.1={{ default .Values.kafkaConnect.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafkaConnect.image.name }}:{{ default .Values.kafkaConnect.image.tagPrefix .Values.imageTagOverride }}-kafka-2.0.1
+ 2.1.0={{ default .Values.kafkaConnect.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafkaConnect.image.name }}:{{ default .Values.kafkaConnect.image.tagPrefix .Values.imageTagOverride }}-kafka-2.1.0
+ - name: STRIMZI_KAFKA_CONNECT_S2I_IMAGES
+ value: |
+ 2.0.0={{ default .Values.kafkaConnects2i.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafkaConnects2i.image.name }}:{{ default .Values.kafkaConnects2i.image.tagPrefix .Values.imageTagOverride }}-kafka-2.0.0
+ 2.0.1={{ default .Values.kafkaConnects2i.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafkaConnects2i.image.name }}:{{ default .Values.kafkaConnects2i.image.tagPrefix .Values.imageTagOverride }}-kafka-2.0.1
+ 2.1.0={{ default .Values.kafkaConnects2i.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafkaConnects2i.image.name }}:{{ default .Values.kafkaConnects2i.image.tagPrefix .Values.imageTagOverride }}-kafka-2.1.0
+ - name: STRIMZI_KAFKA_MIRROR_MAKER_IMAGES
+ value: |
+ 2.0.0={{ default .Values.kafkaMirrorMaker.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafkaMirrorMaker.image.name }}:{{ default .Values.kafkaMirrorMaker.image.tagPrefix .Values.imageTagOverride }}-kafka-2.0.0
+ 2.0.1={{ default .Values.kafkaMirrorMaker.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafkaMirrorMaker.image.name }}:{{ default .Values.kafkaMirrorMaker.image.tagPrefix .Values.imageTagOverride }}-kafka-2.0.1
+ 2.1.0={{ default .Values.kafkaMirrorMaker.image.repository .Values.imageRepositoryOverride }}/{{ .Values.kafkaMirrorMaker.image.name }}:{{ default .Values.kafkaMirrorMaker.image.tagPrefix .Values.imageTagOverride }}-kafka-2.1.0
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/values.yaml b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/values.yaml
new file mode 100644
index 00000000..74a7c581
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/charts/strimzi-kafka-operator/values.yaml
@@ -0,0 +1,89 @@
+# Default values for strimzi-kafka-operator.
+
+watchNamespaces:
+ - "*"
+
+image:
+ repository: strimzi
+ name: cluster-operator
+ tag: 0.11.0
+ imagePullPolicy: IfNotPresent
+logLevel: INFO
+fullReconciliationIntervalMs: 120000
+operationTimeoutMs: 300000
+# Docker images that operator uses to provision various components of Strimzi. To use your own registry prefix the
+# repository name with your registry URL.
+# Ex) repository: registry.xyzcorp.com/strimzi/zookeeper
+zookeeper:
+ image:
+ repository: strimzi
+ name: zookeeper
+ tag: 0.11.0
+kafka:
+ image:
+ repository: strimzi
+ name: kafka
+ tagPrefix: 0.11.0
+kafkaConnect:
+ image:
+ repository: strimzi
+ name: kafka-connect
+ tagPrefix: 0.11.0
+kafkaConnects2i:
+ image:
+ repository: strimzi
+ name: kafka-connect-s2i
+ tagPrefix: 0.11.0
+topicOperator:
+ image:
+ repository: strimzi
+ name: topic-operator
+ tag: 0.11.0
+userOperator:
+ image:
+ repository: strimzi
+ name: user-operator
+ tag: 0.11.0
+kafkaInit:
+ image:
+ repository: strimzi
+ name: kafka-init
+ tag: 0.11.0
+tlsSidecarZookeeper:
+ image:
+ repository: strimzi
+ name: zookeeper-stunnel
+ tag: 0.11.0
+tlsSidecarKafka:
+ image:
+ repository: strimzi
+ name: kafka-stunnel
+ tag: 0.11.0
+tlsSidecarEntityOperator:
+ image:
+ repository: strimzi
+ name: entity-operator-stunnel
+ tag: 0.11.0
+kafkaMirrorMaker:
+ image:
+ repository: strimzi
+ name: kafka-mirror-maker
+ tagPrefix: 0.11.0
+resources:
+ limits:
+ memory: 256Mi
+ cpu: 1000m
+ requests:
+ memory: 256Mi
+ cpu: 200m
+livenessProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+readinessProbe:
+ initialDelaySeconds: 10
+ periodSeconds: 30
+
+# Override the docker image repository used by all Strimzi images
+# imageRepositoryOverride: foobar
+# Override the docker image tag used by all Strimzi images
+# imageTagOverride: latest
diff --git a/vnfs/DAaaS/deploy/messaging/values.yaml b/vnfs/DAaaS/deploy/messaging/values.yaml
new file mode 100644
index 00000000..5872bdf9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/messaging/values.yaml
@@ -0,0 +1,29 @@
+# Copyright © 2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 370
+ repository: nexus3.onap.org:10001
+ readinessRepository: oomk8s
+ readinessImage: readiness-check:2.0.0
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+
+#################################################################
+# k8s Operator Day-0 configuration defaults.
+#################################################################
+
diff --git a/vnfs/DAaaS/deploy/minio/.helmignore b/vnfs/DAaaS/deploy/minio/.helmignore
new file mode 100644
index 00000000..f0c13194
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/vnfs/DAaaS/deploy/minio/Chart.yaml b/vnfs/DAaaS/deploy/minio/Chart.yaml
new file mode 100755
index 00000000..aefd5629
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/Chart.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+description: Minio is a high performance distributed object storage server, designed for large-scale private cloud infrastructure.
+name: minio
+version: 2.4.6
+appVersion: RELEASE.2019-02-12T21-58-47Z
+keywords:
+- storage
+- object-storage
+- S3
+home: https://minio.io
+icon: https://www.minio.io/img/logo_160x160.png
+sources:
+- https://github.com/minio/minio
+maintainers:
+- name: Acaleph
+ email: hello@acale.ph
+- name: Minio
+ email: dev@minio.io
+- name: nitisht
+ email: nitish@min.io
+- name: wlan0
+ email: sid@min.io
diff --git a/vnfs/DAaaS/deploy/minio/README.md b/vnfs/DAaaS/deploy/minio/README.md
new file mode 100755
index 00000000..1b101647
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/README.md
@@ -0,0 +1,330 @@
+Minio
+=====
+
+[Minio](https://minio.io) is a distributed object storage service for high performance, high scale data infrastructures. It is a drop in replacement for AWS S3 in your own environment. It uses erasure coding to provide highly resilient storage that can tolerate failures of upto n/2 nodes. It runs on cloud, container, kubernetes and bare-metal environments. It is simple enough to be deployed in seconds, and can scale to 100s of peta bytes. Minio is suitable for storing objects such as photos, videos, log files, backups, VM and container images.
+
+Minio supports [distributed mode](https://docs.minio.io/docs/distributed-minio-quickstart-guide). In distributed mode, you can pool multiple drives (even on different machines) into a single object storage server.
+
+Introduction
+------------
+
+This chart bootstraps Minio deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Prerequisites
+-------------
+
+- Kubernetes 1.4+ with Beta APIs enabled for default standalone mode.
+- Kubernetes 1.5+ with Beta APIs enabled to run Minio in [distributed mode](#distributed-minio).
+- PV provisioner support in the underlying infrastructure.
+
+Installing the Chart
+--------------------
+
+Install this chart using:
+
+```bash
+$ helm install stable/minio
+```
+
+The command deploys Minio on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+### Release name
+
+An instance of a chart running in a Kubernetes cluster is called a release. Each release is identified by a unique name within the cluster. Helm automatically assigns a unique release name after installing the chart. You can also set your preferred name by:
+
+```bash
+$ helm install --name my-release stable/minio
+```
+
+### Access and Secret keys
+
+By default a pre-generated access and secret key will be used. To override the default keys, pass the access and secret keys as arguments to helm install.
+
+```bash
+$ helm install --set accessKey=myaccesskey,secretKey=mysecretkey \
+ stable/minio
+```
+
+### Updating Minio configuration via Helm
+
+[ConfigMap](https://kubernetes.io/docs/user-guide/configmap/) allows injecting containers with configuration data even while a Helm release is deployed.
+
+To update your Minio server configuration while it is deployed in a release, you need to
+
+1. Check all the configurable values in the Minio chart using `helm inspect values stable/minio`.
+2. Override the `minio_server_config` settings in a YAML formatted file, and then pass that file like this `helm upgrade -f config.yaml stable/minio`.
+3. Restart the Minio server(s) for the changes to take effect.
+
+You can also check the history of upgrades to a release using `helm history my-release`. Replace `my-release` with the actual release name.
+
+Uninstalling the Chart
+----------------------
+
+Assuming your release is named as `my-release`, delete it using the command:
+
+```bash
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+Upgrading the Chart
+-------------------
+
+You can use Helm to update Minio version in a live release. Assuming your release is named as `my-release`, get the values using the command:
+
+```bash
+$ helm get values my-release > old_values.yaml
+```
+
+Then change the field `image.tag` in `old_values.yaml` file with Minio image tag you want to use. Now update the chart using
+
+```bash
+$ helm upgrade -f old_values.yaml my-release stable/minio
+```
+
+Default upgrade strategies are specified in the `values.yaml` file. Update these fields if you'd like to use a different strategy.
+
+Configuration
+-------------
+
+The following table lists the configurable parameters of the Minio chart and their default values.
+
+| Parameter | Description | Default |
+|----------------------------|-------------------------------------|---------------------------------------------------------|
+| `image.repository` | Image repository | `minio/minio` |
+| `image.tag` | Minio image tag. Possible values listed [here](https://hub.docker.com/r/minio/minio/tags/).| `RELEASE.2019-02-12T21-58-47Z`|
+| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
+| `mcImage.repository` | Client image repository | `minio/mc` |
+| `mcImage.tag` | mc image tag. Possible values listed [here](https://hub.docker.com/r/minio/mc/tags/).| `RELEASE.2019-02-13T19-48-27Z`|
+| `mcImage.pullPolicy` | mc Image pull policy | `IfNotPresent` |
+| `ingress.enabled` | Enables Ingress | `false` |
+| `ingress.annotations` | Ingress annotations | `{}` |
+| `ingress.hosts` | Ingress accepted hostnames | `[]` |
+| `ingress.tls` | Ingress TLS configuration | `[]` |
+| `mode` | Minio server mode (`standalone` or `distributed`)| `standalone` |
+| `replicas` | Number of nodes (applicable only for Minio distributed mode). Should be 4 <= x <= 32 | `4` |
+| `existingSecret` | Name of existing secret with access and secret key.| `""` |
+| `accessKey` | Default access key (5 to 20 characters) | `AKIAIOSFODNN7EXAMPLE` |
+| `secretKey` | Default secret key (8 to 40 characters) | `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` |
+| `configPath` | Default config file location | `~/.minio` |
+| `configPathmc` | Default config file location for minio client - mc | `~/.mc` |
+| `mountPath` | Default mount location for persistent drive| `/export` |
+| `clusterDomain` | domain name of kubernetes cluster where pod is running.| `cluster.local` |
+| `service.type` | Kubernetes service type | `ClusterIP` |
+| `service.port` | Kubernetes port where service is exposed| `9000` |
+| `service.externalIPs` | service external IP addresses | `nil` |
+| `service.annotations` | Service annotations | `{}` |
+| `persistence.enabled` | Use persistent volume to store data | `true` |
+| `persistence.size` | Size of persistent volume claim | `10Gi` |
+| `persistence.existingClaim`| Use an existing PVC to persist data | `nil` |
+| `persistence.storageClass` | Storage class name of PVC | `nil` |
+| `persistence.accessMode` | ReadWriteOnce or ReadOnly | `ReadWriteOnce` |
+| `persistence.subPath` | Mount a sub directory of the persistent volume if set | `""` |
+| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` |
+| `priorityClassName` | Pod priority settings | `""` |
+| `nodeSelector` | Node labels for pod assignment | `{}` |
+| `affinity` | Affinity settings for pod assignment | `{}` |
+| `tolerations` | Toleration labels for pod assignment | `[]` |
+| `podAnnotations` | Pod annotations | `{}` |
+| `tls.enabled` | Enable TLS for Minio server | `false` |
+| `tls.certSecret` | Kubernetes Secret with `public.crt` and `private.key` files. | `""` |
+| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `5` |
+| `livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `livenessProbe.timeoutSeconds` | When the probe times out | `1` |
+| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
+| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` |
+| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` |
+| `readinessProbe.periodSeconds` | How often to perform the probe | `15` |
+| `readinessProbe.timeoutSeconds` | When the probe times out | `1` |
+| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
+| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` |
+| `defaultBucket.enabled` | If set to true, a bucket will be created after minio install | `false` |
+| `defaultBucket.name` | Bucket name | `bucket` |
+| `defaultBucket.policy` | Bucket policy | `none` |
+| `defaultBucket.purge` | Purge the bucket if already exists | `false` |
+| `buckets` | List of buckets to create after minio install | `[]` |
+| `s3gateway.enabled` | Use minio as a [s3 gateway](https://github.com/minio/minio/blob/master/docs/gateway/s3.md)| `false` |
+| `s3gateway.replicas` | Number of s3 gateway instances to run in parallel | `4` |
+| `s3gateway.serviceEndpoint`| Endpoint to the S3 compatible service | `""` |
+| `azuregateway.enabled` | Use minio as an [azure gateway](https://docs.minio.io/docs/minio-gateway-for-azure)| `false` |
+| `gcsgateway.enabled` | Use minio as a [Google Cloud Storage gateway](https://docs.minio.io/docs/minio-gateway-for-gcs)| `false` |
+| `gcsgateway.gcsKeyJson` | credential json file of service account key | `""` |
+| `gcsgateway.projectId` | Google cloud project id | `""` |
+| `ossgateway.enabled` | Use minio as an [Alibaba Cloud Object Storage Service gateway](https://github.com/minio/minio/blob/master/docs/gateway/oss.md)| `false` |
+| `ossgateway.replicas` | Number of oss gateway instances to run in parallel | `4` |
+| `ossgateway.endpointURL` | OSS server endpoint. | `""` |
+| `nasgateway.enabled` | Use minio as a [NAS gateway](https://docs.minio.io/docs/minio-gateway-for-nas) | `false` |
+| `nasgateway.replicas` | Number of NAS gateway instances to be run in parallel on a PV | `4` |
+| `environment` | Set Minio server relevant environment variables in `values.yaml` file. Minio containers will be passed these variables when they start. | `MINIO_BROWSER: "on"` |
+
+Some of the parameters above map to the env variables defined in the [Minio DockerHub image](https://hub.docker.com/r/minio/minio/).
+
+You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```bash
+$ helm install --name my-release \
+ --set persistence.size=100Gi \
+ stable/minio
+```
+
+The above command deploys Minio server with a 100Gi backing persistent volume.
+
+Alternately, you can provide a YAML file that specifies parameter values while installing the chart. For example,
+
+```bash
+$ helm install --name my-release -f values.yaml stable/minio
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+Distributed Minio
+-----------
+
+This chart provisions a Minio server in standalone mode, by default. To provision Minio server in [distributed mode](https://docs.minio.io/docs/distributed-minio-quickstart-guide), set the `mode` field to `distributed`,
+
+```bash
+$ helm install --set mode=distributed stable/minio
+```
+
+This provisions Minio server in distributed mode with 4 nodes. To change the number of nodes in your distributed Minio server, set the `replicas` field,
+
+```bash
+$ helm install --set mode=distributed,replicas=8 stable/minio
+```
+
+This provisions Minio server in distributed mode with 8 nodes. Note that the `replicas` value should be an integer between 4 and 16 (inclusive).
+
+### StatefulSet [limitations](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/#limitations) applicable to distributed Minio
+
+1. StatefulSets need persistent storage, so the `persistence.enabled` flag is ignored when `mode` is set to `distributed`.
+2. When uninstalling a distributed Minio release, you'll need to manually delete volumes associated with the StatefulSet.
+
+NAS Gateway
+-----------
+
+### Prerequisites
+
+Minio in [NAS gateway mode](https://docs.minio.io/docs/minio-gateway-for-nas) can be used to create multiple Minio instances backed by single PV in `ReadWriteMany` mode. Currently few [Kubernetes volume plugins](https://kubernetes.io/docs/user-guide/persistent-volumes/#access-modes) support `ReadWriteMany` mode. To deploy Minio NAS gateway with Helm chart you'll need to have a Persistent Volume running with one of the supported volume plugins. [This document](https://kubernetes.io/docs/user-guide/volumes/#nfs)
+outlines steps to create a NFS PV in Kubernetes cluster.
+
+### Provision NAS Gateway Minio instances
+
+To provision Minio servers in [NAS gateway mode](https://docs.minio.io/docs/minio-gateway-for-nas), set the `nasgateway.enabled` field to `true`,
+
+```bash
+$ helm install --set nasgateway.enabled=true stable/minio
+```
+
+This provisions 4 Minio NAS gateway instances backed by single storage. To change the number of instances in your Minio deployment, set the `replicas` field,
+
+```bash
+$ helm install --set nasgateway.enabled=true,nasgateway.replicas=8 stable/minio
+```
+
+This provisions Minio NAS gateway with 8 instances.
+
+Persistence
+-----------
+
+This chart provisions a PersistentVolumeClaim and mounts corresponding persistent volume to default location `/export`. You'll need physical storage available in the Kubernetes cluster for this to work. If you'd rather use `emptyDir`, disable PersistentVolumeClaim by:
+
+```bash
+$ helm install --set persistence.enabled=false stable/minio
+```
+
+> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."*
+
+Existing PersistentVolumeClaim
+------------------------------
+
+If a Persistent Volume Claim already exists, specify it during installation.
+
+1. Create the PersistentVolume
+2. Create the PersistentVolumeClaim
+3. Install the chart
+
+```bash
+$ helm install --set persistence.existingClaim=PVC_NAME stable/minio
+```
+
+NetworkPolicy
+-------------
+
+To enable network policy for Minio,
+install [a networking plugin that implements the Kubernetes
+NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin),
+and set `networkPolicy.enabled` to `true`.
+
+For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting
+the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace:
+
+ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}"
+
+With NetworkPolicy enabled, traffic will be limited to just port 9000.
+
+For more precise policy, set `networkPolicy.allowExternal=true`. This will
+only allow pods with the generated client label to connect to Minio.
+This label will be displayed in the output of a successful install.
+
+Existing secret
+---------------
+
+Instead of having this chart create the secret for you, you can supply a preexisting secret, much
+like an existing PersistentVolumeClaim.
+
+First, create the secret:
+```bash
+$ kubectl create secret generic my-minio-secret --from-literal=accesskey=foobarbaz --from-literal=secretkey=foobarbazqux
+```
+
+Then install the chart, specifying that you want to use an existing secret:
+```bash
+$ helm install --set existingSecret=my-minio-secret stable/minio
+```
+
+The following fields are expected in the secret
+1. `accesskey` - the access key ID
+2. `secretkey` - the secret key
+3. `gcs_key.json` - The GCS key if you are using the GCS gateway feature. This is optional.
+
+Configure TLS
+-------------
+
+To enable TLS for Minio containers, acquire TLS certificates from a CA or create self-signed certificates. While creating / acquiring certificates ensure the corresponding domain names are set as per the standard [DNS naming conventions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-identity) in a Kubernetes StatefulSet (for a distributed Minio setup). Then create a secret using
+
+```bash
+$ kubectl create secret generic tls-ssl-minio --from-file=path/to/private.key --from-file=path/to/public.crt
+```
+
+Then install the chart, specifying that you want to use the TLS secret:
+
+```bash
+$ helm install --set tls.enabled=true,tls.certSecret=tls-ssl-minio stable/minio
+```
+
+Pass environment variables to Minio containers
+----------------------------------------------
+
+To pass environment variables to Minio containers when deploying via Helm chart, use the below command line format
+
+```bash
+$ helm install --set environment.MINIO_BROWSER=on,environment.MINIO_DOMAIN=domain-name stable/minio
+```
+
+You can add as many environment variables as required, using the above format. Just add `environment.<VARIABLE_NAME>=<value>` under `set` flag.
+
+Create buckets after install
+---------------------------
+
+Install the chart, specifying the buckets you want to create after install:
+
+```bash
+$ helm install --set buckets[0].name=bucket1,buckets[0].policy=none,buckets[0].purge=false stable/minio
+```
+
+Description of the configuration parameters used above -
+1. `buckets[].name` - name of the bucket to create, must be a string with length > 0
+2. `buckets[].policy` - Can be one of none|download|upload|public
+3. `buckets[].purge` - Purge if bucket exists already
+
diff --git a/vnfs/DAaaS/deploy/minio/templates/NOTES.txt b/vnfs/DAaaS/deploy/minio/templates/NOTES.txt
new file mode 100644
index 00000000..b690f502
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/NOTES.txt
@@ -0,0 +1,44 @@
+{{- if eq .Values.service.type "ClusterIP" "NodePort" }}
+Minio can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster:
+{{ template "minio.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
+
+To access Minio from localhost, run the below commands:
+
+ 1. export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+
+ 2. kubectl port-forward $POD_NAME 9000 --namespace {{ .Release.Namespace }}
+
+Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/
+
+You can now access Minio server on http://localhost:9000. Follow the below steps to connect to Minio server with mc client:
+
+ 1. Download the Minio mc client - https://docs.minio.io/docs/minio-client-quickstart-guide
+
+ 2. mc config host add {{ template "minio.fullname" . }}-local http://localhost:9000 {{ .Values.accessKey }} {{ .Values.secretKey }} S3v4
+
+ 3. mc ls {{ template "minio.fullname" . }}-local
+
+Alternately, you can use your browser or the Minio SDK to access the server - https://docs.minio.io/categories/17
+{{- end }}
+{{- if eq .Values.service.type "LoadBalancer" }}
+Minio can be accessed via port {{ .Values.service.port }} on an external IP address. Get the service external IP address by:
+kubectl get svc --namespace {{ .Release.Namespace }} -l app={{ template "minio.fullname" . }}
+
+Note that the public IP may take a couple of minutes to be available.
+
+You can now access Minio server on http://<External-IP>:9000. Follow the below steps to connect to Minio server with mc client:
+
+ 1. Download the Minio mc client - https://docs.minio.io/docs/minio-client-quickstart-guide
+
+ 2. mc config host add {{ template "minio.fullname" . }}-local http://<External-IP>:{{ .Values.service.port }} {{ .Values.accessKey }} {{ .Values.secretKey }} S3v4
+
+ 3. mc ls {{ template "minio.fullname" . }}-local
+
+Alternately, you can use your browser or the Minio SDK to access the server - https://docs.minio.io/categories/17
+{{- end }}
+
+{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}
+Note: Since NetworkPolicy is enabled, only pods with label
+{{ template "minio.fullname" . }}-client=true"
+will be able to connect to this minio cluster.
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/minio/templates/_helper_create_bucket.txt b/vnfs/DAaaS/deploy/minio/templates/_helper_create_bucket.txt
new file mode 100755
index 00000000..95528793
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/_helper_create_bucket.txt
@@ -0,0 +1,89 @@
+#!/bin/sh
+set -e ; # Have script exit in the event of a failed command.
+
+# connectToMinio
+# Use a check-sleep-check loop to wait for Minio service to be available
+connectToMinio() {
+ SCHEME=$1
+ ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts
+ set -e ; # fail if we can't read the keys.
+ ACCESS=$(cat /config/accesskey) ; SECRET=$(cat /config/secretkey) ;
+ set +e ; # The connections to minio are allowed to fail.
+ echo "Connecting to Minio server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ;
+ MC_COMMAND="mc config host add myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ;
+ $MC_COMMAND ;
+ STATUS=$? ;
+ until [ $STATUS = 0 ]
+ do
+ ATTEMPTS=`expr $ATTEMPTS + 1` ;
+ echo \"Failed attempts: $ATTEMPTS\" ;
+ if [ $ATTEMPTS -gt $LIMIT ]; then
+ exit 1 ;
+ fi ;
+ sleep 2 ; # 1 second intervals between attempts
+ $MC_COMMAND ;
+ STATUS=$? ;
+ done ;
+ set -e ; # reset `e` as active
+ return 0
+}
+
+# checkBucketExists ($bucket)
+# Check if the bucket exists, by using the exit code of `mc ls`
+checkBucketExists() {
+ BUCKET=$1
+ CMD=$(/usr/bin/mc ls myminio/$BUCKET > /dev/null 2>&1)
+ return $?
+}
+
+# createBucket ($bucket, $policy, $purge)
+# Ensure bucket exists, purging if asked to
+createBucket() {
+ BUCKET=$1
+ POLICY=$2
+ PURGE=$3
+
+ # Purge the bucket, if set & exists
+ # Since PURGE is user input, check explicitly for `true`
+ if [ $PURGE = true ]; then
+ if checkBucketExists $BUCKET ; then
+ echo "Purging bucket '$BUCKET'."
+ set +e ; # don't exit if this fails
+ /usr/bin/mc rm -r --force myminio/$BUCKET
+ set -e ; # reset `e` as active
+ else
+ echo "Bucket '$BUCKET' does not exist, skipping purge."
+ fi
+ fi
+
+ # Create the bucket if it does not exist
+ if ! checkBucketExists $BUCKET ; then
+ echo "Creating bucket '$BUCKET'"
+ /usr/bin/mc mb myminio/$BUCKET
+ else
+ echo "Bucket '$BUCKET' already exists."
+ fi
+
+ # At this point, the bucket should exist, skip checking for existence
+ # Set policy on the bucket
+ echo "Setting policy of bucket '$BUCKET' to '$POLICY'."
+ /usr/bin/mc policy $POLICY myminio/$BUCKET
+}
+
+# Try connecting to Minio instance
+{{- if .Values.tls.enabled }}
+scheme=https
+{{- else }}
+scheme=http
+{{- end }}
+connectToMinio $scheme
+
+{{- if or .Values.defaultBucket.enabled }}
+# Create the bucket
+createBucket {{ .Values.defaultBucket.name }} {{ .Values.defaultBucket.policy }} {{ .Values.defaultBucket.purge }}
+{{ else if .Values.buckets }}
+# Create the buckets
+{{- range .Values.buckets }}
+createBucket {{ .name }} {{ .policy }} {{ .purge }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/minio/templates/_helpers.tpl b/vnfs/DAaaS/deploy/minio/templates/_helpers.tpl
new file mode 100644
index 00000000..c8fe9ba7
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/_helpers.tpl
@@ -0,0 +1,43 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "minio.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "minio.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "minio.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for networkpolicy.
+*/}}
+{{- define "minio.networkPolicy.apiVersion" -}}
+{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "networking.k8s.io/v1" -}}
+{{- end -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/minio/templates/configmap.yaml b/vnfs/DAaaS/deploy/minio/templates/configmap.yaml
new file mode 100644
index 00000000..cb11fcd7
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/configmap.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "minio.fullname" . }}
+ labels:
+ app: {{ template "minio.name" . }}
+ chart: {{ template "minio.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+ initialize: |-
+{{ include (print $.Template.BasePath "/_helper_create_bucket.txt") . | indent 4 }}
diff --git a/vnfs/DAaaS/deploy/minio/templates/deployment.yaml b/vnfs/DAaaS/deploy/minio/templates/deployment.yaml
new file mode 100644
index 00000000..af335b2d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/deployment.yaml
@@ -0,0 +1,195 @@
+{{- if eq .Values.mode "standalone" }}
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+ name: {{ template "minio.fullname" . }}
+ labels:
+ app: {{ template "minio.name" . }}
+ chart: {{ template "minio.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ strategy:
+ type: {{ .Values.DeploymentUpdate.type }}
+ rollingUpdate:
+ maxSurge: {{ .Values.DeploymentUpdate.maxSurge }}
+ maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }}
+ {{- if .Values.nasgateway.enabled }}
+ replicas: {{ .Values.nasgateway.replicas }}
+ {{- end }}
+ {{- if .Values.s3gateway.enabled }}
+ replicas: {{ .Values.s3gateway.replicas }}
+ {{- end }}
+ {{- if .Values.azuregateway.enabled }}
+ replicas: {{ .Values.azuregateway.replicas }}
+ {{- end }}
+ {{- if .Values.gcsgateway.enabled }}
+ replicas: {{ .Values.gcsgateway.replicas }}
+ {{- end }}
+ {{- if .Values.ossgateway.enabled }}
+ replicas: {{ .Values.ossgateway.replicas }}
+ {{- end }}
+ selector:
+ matchLabels:
+ app: {{ template "minio.name" . }}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ name: {{ template "minio.fullname" . }}
+ labels:
+ app: {{ template "minio.name" . }}
+ release: {{ .Release.Name }}
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.priorityClassName }}
+ priorityClassName: "{{ .Values.priorityClassName }}"
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ {{- if .Values.s3gateway.enabled }}
+ command: [ "/bin/sh",
+ "-ce",
+ "/usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} gateway s3 {{ .Values.s3gateway.serviceEndpoint }}" ]
+ {{- else }}
+ {{- if .Values.azuregateway.enabled }}
+ command: [ "/bin/sh",
+ "-ce",
+ "/usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} gateway azure" ]
+ {{- else }}
+ {{- if .Values.gcsgateway.enabled }}
+ command: [ "/bin/sh",
+ "-ce",
+ "/usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} gateway gcs {{ .Values.gcsgateway.projectId }}" ]
+ {{- else }}
+ {{- if .Values.ossgateway.enabled }}
+ command: [ "/bin/sh",
+ "-ce",
+ "cp /tmp/config.json {{ .Values.configPath }} &&
+ /usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} gateway oss {{ .Values.ossgateway.endpointURL }}" ]
+ {{- else }}
+ {{- if .Values.nasgateway.enabled }}
+ command: [ "/bin/sh",
+ "-ce",
+ "/usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} gateway nas {{ .Values.mountPath }}" ]
+ {{- else }}
+ command: [ "/bin/sh",
+ "-ce",
+ "/usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} server {{ .Values.mountPath }}" ]
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ volumeMounts:
+ {{- if and .Values.persistence.enabled (not .Values.gcsgateway.enabled) (not .Values.azuregateway.enabled) (not .Values.s3gateway.enabled) }}
+ - name: export
+ mountPath: {{ .Values.mountPath }}
+ {{- if .Values.persistence.subPath }}
+ subPath: "{{ .Values.persistence.subPath }}"
+ {{- end }}
+ {{- end }}
+ {{- if .Values.gcsgateway.enabled }}
+ - name: minio-user
+ mountPath: "/etc/credentials"
+ readOnly: true
+ {{- end }}
+ - name: minio-config-dir
+ mountPath: {{ .Values.configPath }}
+ {{- if .Values.tls.enabled }}
+ - name: cert-secret-volume
+ mountPath: {{ .Values.configPath }}certs
+ {{ end }}
+ ports:
+ - name: service
+ containerPort: 9000
+ env:
+ - name: MINIO_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{ else }}{{ template "minio.fullname" . }}{{ end }}
+ key: accesskey
+ - name: MINIO_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{ else }}{{ template "minio.fullname" . }}{{ end }}
+ key: secretkey
+ {{- if .Values.gcsgateway.enabled }}
+ - name: GOOGLE_APPLICATION_CREDENTIALS
+ value: "/etc/credentials/gcs_key.json"
+ {{- end }}
+ {{- range $key, $val := .Values.environment }}
+ - name: {{ $key }}
+ value: {{ $val | quote }}
+ {{- end}}
+ livenessProbe:
+ httpGet:
+ path: /minio/health/live
+ port: service
+ {{- if .Values.tls.enabled }}
+ scheme: HTTPS
+ {{ else }}
+ scheme: HTTP
+ {{- end }}
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+ readinessProbe:
+ httpGet:
+ {{- if .Values.tls.enabled }}
+ scheme: HTTPS
+ {{- end }}
+ path: /minio/health/ready
+ port: service
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+{{- with .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+{{- end }}
+{{- with .Values.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+{{- end }}
+{{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ volumes:
+ {{- if and (not .Values.gcsgateway.enabled) (not .Values.azuregateway.enabled) (not .Values.s3gateway.enabled) }}
+ - name: export
+ {{- if .Values.persistence.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }}
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- end }}
+ - name: minio-user
+ secret:
+ secretName: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{ else }}{{ template "minio.fullname" . }}{{ end }}
+ - name: minio-config-dir
+ emptyDir: {}
+ {{- if .Values.tls.enabled }}
+ - name: cert-secret-volume
+ secret:
+ secretName: {{ .Values.tls.certSecret }}
+ items:
+ - key: {{ .Values.tls.publicCrt }}
+ path: public.crt
+ - key: {{ .Values.tls.privateKey }}
+ path: private.key
+ - key: {{ .Values.tls.publicCrt }}
+ path: CAs/public.crt
+ {{ end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/minio/templates/ingress.yaml b/vnfs/DAaaS/deploy/minio/templates/ingress.yaml
new file mode 100644
index 00000000..5168b861
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/ingress.yaml
@@ -0,0 +1,39 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "minio.fullname" . -}}
+{{- $servicePort := .Values.service.port -}}
+{{- $ingressPath := .Values.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ app: {{ template "minio.name" . }}
+ chart: {{ template "minio.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- with .Values.ingress.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+{{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+{{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ . | quote }}
+ http:
+ paths:
+ - path: {{ $ingressPath }}
+ backend:
+ serviceName: {{ $fullName }}
+ servicePort: {{ $servicePort }}
+ {{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/minio/templates/networkpolicy.yaml b/vnfs/DAaaS/deploy/minio/templates/networkpolicy.yaml
new file mode 100644
index 00000000..de57f485
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/networkpolicy.yaml
@@ -0,0 +1,25 @@
+{{- if .Values.networkPolicy.enabled }}
+kind: NetworkPolicy
+apiVersion: {{ template "minio.networkPolicy.apiVersion" . }}
+metadata:
+ name: {{ template "minio.fullname" . }}
+ labels:
+ app: {{ template "minio.name" . }}
+ chart: {{ template "minio.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ podSelector:
+ matchLabels:
+ app: {{ template "minio.name" . }}
+ release: {{ .Release.Name }}
+ ingress:
+ - ports:
+ - port: {{ .Values.service.port }}
+ {{- if not .Values.networkPolicy.allowExternal }}
+ from:
+ - podSelector:
+ matchLabels:
+ {{ template "minio.name" . }}-client: "true"
+ {{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/minio/templates/post-install-create-bucket-job.yaml b/vnfs/DAaaS/deploy/minio/templates/post-install-create-bucket-job.yaml
new file mode 100755
index 00000000..c581338a
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/post-install-create-bucket-job.yaml
@@ -0,0 +1,59 @@
+{{- if or .Values.defaultBucket.enabled .Values.buckets }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ template "minio.fullname" . }}-make-bucket-job
+ labels:
+ app: {{ template "minio.name" . }}
+ chart: {{ template "minio.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ annotations:
+ "helm.sh/hook": post-install,post-upgrade
+ "helm.sh/hook-delete-policy": hook-succeeded
+spec:
+ template:
+ metadata:
+ labels:
+ app: {{ template "minio.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ restartPolicy: OnFailure
+{{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+{{- end }}
+ volumes:
+ - name: minio-configuration
+ projected:
+ sources:
+ - configMap:
+ name: {{ template "minio.fullname" . }}
+ - secret:
+ name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{ else }}{{ template "minio.fullname" . }}{{ end }}
+ {{- if .Values.tls.enabled }}
+ - name: cert-secret-volume-mc
+ secret:
+ secretName: {{ .Values.tls.certSecret }}
+ items:
+ - key: {{ .Values.tls.publicCrt }}
+ path: CAs/public.crt
+ {{ end }}
+ containers:
+ - name: minio-mc
+ image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}"
+ imagePullPolicy: {{ .Values.mcImage.pullPolicy }}
+ command: ["/bin/sh", "/config/initialize"]
+ env:
+ - name: MINIO_ENDPOINT
+ value: {{ template "minio.fullname" . }}
+ - name: MINIO_PORT
+ value: {{ .Values.service.port | quote }}
+ volumeMounts:
+ - name: minio-configuration
+ mountPath: /config
+ {{- if .Values.tls.enabled }}
+ - name: cert-secret-volume-mc
+ mountPath: {{ .Values.configPathmc }}certs
+ {{ end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/minio/templates/pvc.yaml b/vnfs/DAaaS/deploy/minio/templates/pvc.yaml
new file mode 100644
index 00000000..3f4cbb03
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/pvc.yaml
@@ -0,0 +1,27 @@
+{{- if eq .Values.mode "standalone" }}
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ template "minio.fullname" . }}
+ labels:
+ app: {{ template "minio.name" . }}
+ chart: {{ template "minio.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+{{- if and .Values.nasgateway.enabled .Values.nasgateway.pv }}
+ selector:
+ matchLabels:
+ pv: {{ .Values.nasgateway.pv | quote }}
+{{- end }}
+ accessModes:
+ - {{ .Values.persistence.accessMode | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+{{- if .Values.persistence.storageClass }}
+ storageClassName: {{ .Values.persistence.storageClass | quote }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/minio/templates/secrets.yaml b/vnfs/DAaaS/deploy/minio/templates/secrets.yaml
new file mode 100644
index 00000000..b01e0d3c
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/secrets.yaml
@@ -0,0 +1,18 @@
+{{- if not .Values.existingSecret }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "minio.fullname" . }}
+ labels:
+ app: {{ template "minio.name" . }}
+ chart: {{ template "minio.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+type: Opaque
+data:
+ accesskey: {{ .Values.accessKey | b64enc }}
+ secretkey: {{ .Values.secretKey | b64enc }}
+{{- if .Values.gcsgateway.enabled }}
+ gcs_key.json: {{ .Values.gcsgateway.gcsKeyJson | b64enc }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/minio/templates/service.yaml b/vnfs/DAaaS/deploy/minio/templates/service.yaml
new file mode 100644
index 00000000..0799b287
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/service.yaml
@@ -0,0 +1,46 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "minio.fullname" . }}
+ labels:
+ app: {{ template "minio.name" . }}
+ chart: {{ template "minio.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.service.annotations }}
+ annotations:
+{{ toYaml .Values.service.annotations | indent 4 }}
+{{- end }}
+spec:
+{{- if (or (eq .Values.service.type "ClusterIP" "") (empty .Values.service.type)) }}
+ {{- if eq .Values.mode "distributed" }}
+ clusterIP: None
+ {{- else }}
+ type: ClusterIP
+ {{- end }}
+ {{- if not (empty .Values.service.clusterIP) }}
+ clusterIP: {{ .Values.service.clusterIP }}
+ {{end}}
+{{- else if eq .Values.service.type "LoadBalancer" }}
+ type: {{ .Values.service.type }}
+ loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }}
+{{- else }}
+ type: {{ .Values.service.type }}
+{{- end }}
+ ports:
+ - name: service
+ port: 9000
+ targetPort: {{ .Values.service.port }}
+ protocol: TCP
+{{- if (and (eq .Values.service.type "NodePort") ( .Values.service.nodePort)) }}
+ nodePort: {{ .Values.service.nodePort }}
+{{- end}}
+{{- if .Values.service.externalIPs }}
+ externalIPs:
+{{- range $i , $ip := .Values.service.externalIPs }}
+ - {{ $ip }}
+{{- end }}
+{{- end }}
+ selector:
+ app: {{ template "minio.name" . }}
+ release: {{ .Release.Name }}
diff --git a/vnfs/DAaaS/deploy/minio/templates/statefulset.yaml b/vnfs/DAaaS/deploy/minio/templates/statefulset.yaml
new file mode 100644
index 00000000..447b671d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/templates/statefulset.yaml
@@ -0,0 +1,141 @@
+{{- if eq .Values.mode "distributed" }}
+{{ $nodeCount := .Values.replicas | int }}
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+ name: {{ template "minio.fullname" . }}
+ labels:
+ app: {{ template "minio.name" . }}
+ chart: {{ template "minio.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ updateStrategy:
+ type: {{ .Values.StatefulSetUpdate.updateStrategy }}
+ serviceName: {{ template "minio.fullname" . }}
+ replicas: {{ .Values.replicas }}
+ selector:
+ matchLabels:
+ app: {{ template "minio.name" . }}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ name: {{ template "minio.fullname" . }}
+ labels:
+ app: {{ template "minio.name" . }}
+ release: {{ .Release.Name }}
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.priorityClassName }}
+ priorityClassName: "{{ .Values.priorityClassName }}"
+ {{- end }}
+ containers:
+ - name: {{ .Chart.Name }}
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ {{- if .Values.tls.enabled }}
+ command: [ "/bin/sh",
+ "-ce",
+ "/usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} server
+ {{- range $i := until $nodeCount }}
+ https://{{ template `minio.fullname` $ }}-{{ $i }}.{{ template `minio.fullname` $ }}.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}{{ $.Values.mountPath }}
+ {{- end }}" ]
+ {{ else }}
+ command: [ "/bin/sh",
+ "-ce",
+ "/usr/bin/docker-entrypoint.sh minio -C {{ .Values.configPath }} server
+ {{- range $i := until $nodeCount }}
+ http://{{ template `minio.fullname` $ }}-{{ $i }}.{{ template `minio.fullname` $ }}.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}{{ $.Values.mountPath }}
+ {{- end }}" ]
+ {{ end }}
+ volumeMounts:
+ - name: export
+ mountPath: {{ .Values.mountPath }}
+ {{- if and .Values.persistence.enabled .Values.persistence.subPath }}
+ subPath: "{{ .Values.persistence.subPath }}"
+ {{- end }}
+ - name: minio-config-dir
+ mountPath: {{ .Values.configPath }}
+ {{- if .Values.tls.enabled }}
+ - name: cert-secret-volume
+ mountPath: {{ .Values.configPath }}certs
+ {{ end }}
+ ports:
+ - name: service
+ containerPort: 9000
+ env:
+ - name: MINIO_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{ else }}{{ template "minio.fullname" . }}{{ end }}
+ key: accesskey
+ - name: MINIO_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{ else }}{{ template "minio.fullname" . }}{{ end }}
+ key: secretkey
+ {{- range $key, $val := .Values.environment }}
+ - name: {{ $key }}
+ value: {{ $val | quote }}
+ {{- end}}
+ livenessProbe:
+ httpGet:
+ path: /minio/health/live
+ port: service
+ {{- if .Values.tls.enabled }}
+ scheme: HTTPS
+ {{ else }}
+ scheme: HTTP
+ {{- end }}
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ volumes:
+ - name: minio-user
+ secret:
+ secretName: {{ if .Values.existingSecret }}{{ .Values.existingSecret }}{{ else }}{{ template "minio.fullname" . }}{{ end }}
+ - name: minio-config-dir
+ emptyDir: {}
+ {{- if .Values.tls.enabled }}
+ - name: cert-secret-volume
+ secret:
+ secretName: {{ .Values.tls.certSecret }}
+ items:
+ - key: {{ .Values.tls.publicCrt }}
+ path: public.crt
+ - key: {{ .Values.tls.privateKey }}
+ path: private.key
+ - key: {{ .Values.tls.publicCrt }}
+ path: CAs/public.crt
+ {{ end }}
+ volumeClaimTemplates:
+ - metadata:
+ name: export
+ spec:
+ accessModes: [ {{ .Values.persistence.accessMode | quote }} ]
+ {{- if .Values.persistence.storageClass }}
+ storageClassName: {{ .Values.persistence.storageClass }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/minio/values.yaml b/vnfs/DAaaS/deploy/minio/values.yaml
new file mode 100755
index 00000000..1b81a8cb
--- /dev/null
+++ b/vnfs/DAaaS/deploy/minio/values.yaml
@@ -0,0 +1,331 @@
+## set kubernetes cluster domain where minio is running
+##
+clusterDomain: cluster.local
+
+## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the
+##
+image:
+ repository: minio/minio
+ tag: RELEASE.2019-02-12T21-58-47Z
+ pullPolicy: IfNotPresent
+
+## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio
+## client used to create a default bucket).
+##
+mcImage:
+ repository: minio/mc
+ tag: RELEASE.2019-02-13T19-48-27Z
+ pullPolicy: IfNotPresent
+
+## minio server mode, i.e. standalone or distributed.
+## Distributed Minio ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide
+##
+mode: distributed
+
+## Update strategy for Deployments
+DeploymentUpdate:
+ type: RollingUpdate
+ maxUnavailable: 0
+ maxSurge: 100%
+
+## Update strategy for StatefulSets
+StatefulSetUpdate:
+ updateStrategy: RollingUpdate
+
+## Pod priority settings
+## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+##
+priorityClassName: ""
+
+## Set default accesskey, secretkey, Minio config file path, volume mount path and
+## number of nodes (only used for Minio distributed mode)
+## Distributed Minio ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide
+##
+existingSecret: ""
+accessKey: "onapdaas"
+secretKey: "onapsecretdaas"
+configPath: "/root/.minio/"
+configPathmc: "/root/.mc/"
+mountPath: "/export"
+replicas: 4
+
+## TLS Settings for Minio
+tls:
+ enabled: false
+ ## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret
+ certSecret: ""
+ publicCrt: public.crt
+ privateKey: private.key
+
+## Enable persistence using Persistent Volume Claims
+## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+##
+persistence:
+ enabled: true
+
+ ## A manually managed Persistent Volume and Claim
+ ## Requires persistence.enabled: true
+ ## If defined, PVC must be created manually before volume will be bound
+ # existingClaim:
+
+ ## minio data Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ ## Storage class of PV to bind. By default it looks for standard storage class.
+ ## If the PV uses a different storage class, specify that here.
+ # storageClass: standard
+ accessMode: ReadWriteOnce
+ size: 20Gi
+
+ ## If subPath is set mount a sub folder of a volume instead of the root of the volume.
+ ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
+ ##
+ subPath: ""
+
+## Expose the Minio service to be accessed from outside the cluster (LoadBalancer service).
+## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
+## ref: http://kubernetes.io/docs/user-guide/services/
+##
+
+service:
+ type: ClusterIP
+ clusterIP: ~
+ port: 9000
+ # nodePort: 31311
+ # externalIPs:
+ # - externalIp1
+ annotations: {}
+ # prometheus.io/scrape: 'true'
+ # prometheus.io/path: '/minio/prometheus/metrics'
+ # prometheus.io/port: '9000'
+
+ingress:
+ enabled: true
+ annotations:
+ kubernetes.io/ingress.class: gloo
+ path: /.*
+ hosts:
+ - minio.modelrepo
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+## Node labels for pod assignment
+## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+tolerations: []
+affinity: {}
+
+# Additational pod annotations
+podAnnotations: {}
+
+## Liveness and Readiness probe values.
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+livenessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 30
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+readinessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 15
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+##
+resources:
+ requests:
+ memory: 256Mi
+ cpu: 250m
+
+## Create a bucket after minio install
+##
+defaultBucket:
+ enabled: false
+ ## If enabled, must be a string with length > 0
+ name: bucket
+ ## Can be one of none|download|upload|public
+ policy: none
+ ## Purge if bucket exists already
+ purge: false
+
+## Create multiple buckets after minio install
+## Enabling `defaultBucket` will take priority over this list
+##
+buckets: []
+ # - name: bucket1
+ # policy: none
+ # purge: false
+ # - name: bucket2
+ # policy: none
+ # purge: false
+
+s3gateway:
+ enabled: true
+ replicas: 4
+ serviceEndpoint: ""
+
+## Use minio as an azure blob gateway, you should disable data persistence so no volume claim are created.
+## https://docs.minio.io/docs/minio-gateway-for-azure
+azuregateway:
+ enabled: false
+ # Number of parallel instances
+ replicas: 4
+
+## Use minio as GCS (Google Cloud Storage) gateway, you should disable data persistence so no volume claim are created.
+## https://docs.minio.io/docs/minio-gateway-for-gcs
+
+gcsgateway:
+ enabled: false
+ # Number of parallel instances
+ replicas: 4
+ # credential json file of service account key
+ gcsKeyJson: ""
+ # Google cloud project-id
+ projectId: ""
+
+ossgateway:
+ enabled: false
+ # Number of parallel instances
+ replicas: 4
+ endpointURL: ""
+
+## Use minio on NAS backend
+## https://docs.minio.io/docs/minio-gateway-for-nas
+
+nasgateway:
+ enabled: false
+ # Number of parallel instances
+ replicas: 4
+ # For NAS Gateway, you may want to bind the PVC to a specific PV. To ensure that happens, PV to bind to should have
+ # a label like "pv: <value>", use value here.
+ pv: ~
+
+## Use this field to add environment variables relevant to Minio server. These fields will be passed on to Minio container(s)
+## when Chart is deployed
+environment:
+ ## To disable Minio Browser, set this value to off
+ MINIO_BROWSER: "on"
+ ## To enable virtual-host-style requests, set this value to Minio host domain name.
+ # MINIO_DOMAIN: ""
+ ## Minio Cache settings, refer: https://docs.minio.io/docs/minio-disk-cache-guide.html
+ # MINIO_CACHE_DRIVES: ""
+ # MINIO_CACHE_EXCLUDE: ""
+ # MINIO_CACHE_EXPIRY: ""
+ # MINIO_CACHE_MAXUSE: ""
+ ## Minio WORM setting, refer: https://docs.minio.io/docs/minio-server-configuration-guide.html
+ # MINIO_WORM: ""
+ ## Minio KMS settings, refer: https://docs.minio.io/docs/minio-kms-quickstart-guide.html
+ # MINIO_SSE_VAULT_ENDPOINT: ""
+ # MINIO_SSE_VAULT_APPROLE_ID: ""
+ # MINIO_SSE_VAULT_APPROLE_SECRET: ""
+ # MINIO_SSE_VAULT_KEY_NAME: ""
+ ## Minio Federation settings, refer: https://docs.minio.io/docs/minio-federation-quickstart-guide.html
+ # MINIO_ETCD_ENDPOINTS: ""
+ # MINIO_PUBLIC_IPS: ""
+ # MINIO_DOMAIN: ""
+ ## Add other environment variables relevant to Minio server here. These values will be added to the container(s) as this Chart is deployed
+
+## https://docs.minio.io/docs/minio-bucket-notification-guide
+## https://github.com/minio/minio/blob/master/docs/config
+minioConfig:
+ region: "us-west-1"
+ browser: "on"
+ domain: ""
+ worm: "off"
+ storageClass:
+ standardStorageClass: ""
+ reducedRedundancyStorageClass: ""
+ cache:
+ drives: []
+ expiry: 90
+ maxuse: 80
+ exclude: []
+ aqmp:
+ enable: false
+ url: ""
+ exchange: ""
+ routingKey: ""
+ exchangeType: ""
+ deliveryMode: 0
+ mandatory: false
+ immediate: false
+ durable: false
+ internal: false
+ noWait: false
+ autoDeleted: false
+ nats:
+ enable: false
+ address: ""
+ subject: ""
+ username: ""
+ password: ""
+ token: ""
+ secure: false
+ pingInterval: 0
+ enableStreaming: false
+ clusterID: ""
+ clientID: ""
+ async: false
+ maxPubAcksInflight: 0
+ elasticsearch:
+ enable: false
+ format: "namespace"
+ url: ""
+ index: ""
+ redis:
+ enable: false
+ format: "namespace"
+ address: ""
+ password: ""
+ key: ""
+ postgresql:
+ enable: false
+ format: "namespace"
+ connectionString: ""
+ table: ""
+ host: ""
+ port: ""
+ user: ""
+ password: ""
+ database: ""
+ kafka:
+ enable: false
+ brokers: "null"
+ topic: ""
+ webhook:
+ enable: false
+ endpoint: ""
+ mysql:
+ enable: false
+ format: "namespace"
+ dsnString: ""
+ table: ""
+ host: ""
+ port: ""
+ user: ""
+ password: ""
+ database: ""
+ mqtt:
+ enable: false
+ broker: ""
+ topic: ""
+ qos: 0
+ clientId: ""
+ username: ""
+ password: ""
+ reconnectInterval: 0
+ keepAliveInterval: 0
+networkPolicy:
+ enabled: false
+ allowExternal: true
diff --git a/vnfs/DAaaS/deploy/operator/.helmignore b/vnfs/DAaaS/deploy/operator/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/deploy/operator/Chart.yaml b/vnfs/DAaaS/deploy/operator/Chart.yaml
new file mode 100644
index 00000000..01c1eb03
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A collection of operator Helm charts.
+name: operator
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/.helmignore b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/.helmignore
new file mode 100644
index 00000000..f0c13194
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/Chart.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/Chart.yaml
new file mode 100755
index 00000000..bdaea5ae
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/Chart.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+description: CoreOS etcd-operator Helm chart for Kubernetes
+name: etcd-operator
+version: 0.8.3
+appVersion: 0.9.3
+home: https://github.com/coreos/etcd-operator
+icon: https://raw.githubusercontent.com/coreos/etcd/master/logos/etcd-horizontal-color.png
+sources:
+- https://github.com/coreos/etcd-operator
+maintainers:
+- name: lachie83
+ email: lachlan@deis.com
+- name: alejandroEsc
+ email: jaescobar.cell@gmail.com
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/OWNERS b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/OWNERS
new file mode 100644
index 00000000..1385151c
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/OWNERS
@@ -0,0 +1,6 @@
+approvers:
+- lachie83
+- alejandroEsc
+reviewers:
+- lachie83
+- alejandroEsc
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/README.md b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/README.md
new file mode 100644
index 00000000..e8fa9c51
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/README.md
@@ -0,0 +1,158 @@
+# CoreOS etcd-operator
+
+[etcd-operator](https://coreos.com/blog/introducing-the-etcd-operator.html) Simplify etcd cluster
+configuration and management.
+
+__DISCLAIMER:__ While this chart has been well-tested, the etcd-operator is still currently in beta.
+Current project status is available [here](https://github.com/coreos/etcd-operator).
+
+## Introduction
+
+This chart bootstraps an etcd-operator and allows the deployment of etcd-cluster(s).
+
+## Official Documentation
+
+Official project documentation found [here](https://github.com/coreos/etcd-operator)
+
+## Prerequisites
+
+- Kubernetes 1.4+ with Beta APIs enabled
+- __Suggested:__ PV provisioner support in the underlying infrastructure to support backups
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```bash
+$ helm install stable/etcd-operator --name my-release
+```
+
+__Note__: If you set `cluster.enabled` on install, it will have no effect.
+Before you create an etcd cluster, the TPR must be installed by the operator, so this option is ignored during helm installs, but can be used in upgrades.
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```bash
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components EXCEPT the persistent volume.
+
+## Updating
+Updating the TPR resource will not result in the cluster being update until `kubectl apply` for
+TPRs is fixed see [kubernetes/issues/29542](https://github.com/kubernetes/kubernetes/issues/29542)
+Work around options are documented [here](https://github.com/coreos/etcd-operator#resize-an-etcd-cluster)
+
+## Configuration
+
+The following table lists the configurable parameters of the etcd-operator chart and their default values.
+
+| Parameter | Description | Default |
+| ------------------------------------------------- | -------------------------------------------------------------------- | ---------------------------------------------- |
+| `rbac.create` | Install required RBAC service account, roles and rolebindings | `true` |
+| `rbac.apiVersion` | RBAC api version `v1alpha1\|v1beta1` | `v1beta1` |
+| `rbac.etcdOperatorServiceAccountName` | Name of the service account resource when RBAC is enabled | `etcd-operator-sa` |
+| `rbac.backupOperatorServiceAccountName` | Name of the service account resource when RBAC is enabled | `etcd-backup-operator-sa` |
+| `rbac.restoreOperatorServiceAccountName` | Name of the service account resource when RBAC is enabled | `etcd-restore-operator-sa` |
+| `deployments.etcdOperator` | Deploy the etcd cluster operator | `true` |
+| `deployments.backupOperator` | Deploy the etcd backup operator | `true` |
+| `deployments.restoreOperator` | Deploy the etcd restore operator | `true` |
+| `customResources.createEtcdClusterCRD` | Create a custom resource: EtcdCluster | `false` |
+| `customResources.createBackupCRD` | Create an a custom resource: EtcdBackup | `false` |
+| `customResources.createRestoreCRD` | Create an a custom resource: EtcdRestore | `false` |
+| `etcdOperator.name` | Etcd Operator name | `etcd-operator` |
+| `etcdOperator.replicaCount` | Number of operator replicas to create (only 1 is supported) | `1` |
+| `etcdOperator.image.repository` | etcd-operator container image | `quay.io/coreos/etcd-operator` |
+| `etcdOperator.image.tag` | etcd-operator container image tag | `v0.9.3` |
+| `etcdOperator.image.pullpolicy` | etcd-operator container image pull policy | `Always` |
+| `etcdOperator.resources.cpu` | CPU limit per etcd-operator pod | `100m` |
+| `etcdOperator.resources.memory` | Memory limit per etcd-operator pod | `128Mi` |
+| `etcdOperator.nodeSelector` | Node labels for etcd operator pod assignment | `{}` |
+| `etcdOperator.commandArgs` | Additional command arguments | `{}` |
+| `backupOperator.name` | Backup operator name | `etcd-backup-operator` |
+| `backupOperator.replicaCount` | Number of operator replicas to create (only 1 is supported) | `1` |
+| `backupOperator.image.repository` | Operator container image | `quay.io/coreos/etcd-operator` |
+| `backupOperator.image.tag` | Operator container image tag | `v0.9.3` |
+| `backupOperator.image.pullpolicy` | Operator container image pull policy | `Always` |
+| `backupOperator.resources.cpu` | CPU limit per etcd-operator pod | `100m` |
+| `backupOperator.resources.memory` | Memory limit per etcd-operator pod | `128Mi` |
+| `backupOperator.spec.storageType` | Storage to use for backup file, currently only S3 supported | `S3` |
+| `backupOperator.spec.s3.s3Bucket` | Bucket in S3 to store backup file | |
+| `backupOperator.spec.s3.awsSecret` | Name of kubernetes secret containing aws credentials | |
+| `backupOperator.nodeSelector` | Node labels for etcd operator pod assignment | `{}` |
+| `backupOperator.commandArgs` | Additional command arguments | `{}` |
+| `restoreOperator.name` | Restore operator name | `etcd-backup-operator` |
+| `restoreOperator.replicaCount` | Number of operator replicas to create (only 1 is supported) | `1` |
+| `restoreOperator.image.repository` | Operator container image | `quay.io/coreos/etcd-operator` |
+| `restoreOperator.image.tag` | Operator container image tag | `v0.9.3` |
+| `restoreOperator.image.pullpolicy` | Operator container image pull policy | `Always` |
+| `restoreOperator.resources.cpu` | CPU limit per etcd-operator pod | `100m` |
+| `restoreOperator.resources.memory` | Memory limit per etcd-operator pod | `128Mi` |
+| `restoreOperator.spec.s3.path` | Path in S3 bucket containing the backup file | |
+| `restoreOperator.spec.s3.awsSecret` | Name of kubernetes secret containing aws credentials | |
+| `restoreOperator.nodeSelector` | Node labels for etcd operator pod assignment | `{}` |
+| `restoreOperator.commandArgs` | Additional command arguments | `{}` |
+| `etcdCluster.name` | etcd cluster name | `etcd-cluster` |
+| `etcdCluster.size` | etcd cluster size | `3` |
+| `etcdCluster.version` | etcd cluster version | `3.2.25` |
+| `etcdCluster.image.repository` | etcd container image | `quay.io/coreos/etcd-operator` |
+| `etcdCluster.image.tag` | etcd container image tag | `v3.2.25` |
+| `etcdCluster.image.pullPolicy` | etcd container image pull policy | `Always` |
+| `etcdCluster.enableTLS` | Enable use of TLS | `false` |
+| `etcdCluster.tls.static.member.peerSecret` | Kubernetes secret containing TLS peer certs | `etcd-peer-tls` |
+| `etcdCluster.tls.static.member.serverSecret` | Kubernetes secret containing TLS server certs | `etcd-server-tls` |
+| `etcdCluster.tls.static.operatorSecret` | Kubernetes secret containing TLS client certs | `etcd-client-tls` |
+| `etcdCluster.pod.antiAffinity` | Whether etcd cluster pods should have an antiAffinity | `false` |
+| `etcdCluster.pod.resources.limits.cpu` | CPU limit per etcd cluster pod | `100m` |
+| `etcdCluster.pod.resources.limits.memory` | Memory limit per etcd cluster pod | `128Mi` |
+| `etcdCluster.pod.resources.requests.cpu` | CPU request per etcd cluster pod | `100m` |
+| `etcdCluster.pod.resources.requests.memory` | Memory request per etcd cluster pod | `128Mi` |
+| `etcdCluster.pod.nodeSelector` | Node labels for etcd cluster pod assignment | `{}` |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example:
+
+```bash
+$ helm install --name my-release --set image.tag=v0.2.1 stable/etcd-operator
+```
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while
+installing the chart. For example:
+
+```bash
+$ helm install --name my-release --values values.yaml stable/etcd-operator
+```
+
+## RBAC
+By default the chart will install the recommended RBAC roles and rolebindings.
+
+To determine if your cluster supports this running the following:
+
+```console
+$ kubectl api-versions | grep rbac
+```
+
+You also need to have the following parameter on the api server. See the following document for how to enable [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)
+
+```
+--authorization-mode=RBAC
+```
+
+If the output contains "beta" or both "alpha" and "beta" you can may install rbac by default, if not, you may turn RBAC off as described below.
+
+### RBAC role/rolebinding creation
+
+RBAC resources are enabled by default. To disable RBAC do the following:
+
+```console
+$ helm install --name my-release stable/etcd-operator --set rbac.create=false
+```
+
+### Changing RBAC manifest apiVersion
+
+By default the RBAC resources are generated with the "v1beta1" apiVersion. To use "v1alpha1" do the following:
+
+```console
+$ helm install --name my-release stable/etcd-operator --set rbac.install=true,rbac.apiVersion=v1alpha1
+```
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/NOTES.txt b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/NOTES.txt
new file mode 100644
index 00000000..c33ee014
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/NOTES.txt
@@ -0,0 +1,33 @@
+{{- $clusterEnabled := (and (not .Release.IsInstall) .Values.customResources.createEtcdClusterCRD) -}}
+{{- if and .Release.IsInstall .Values.customResources.createEtcdClusterCRD -}}
+Not enabling cluster, the ThirdPartResource must be installed before you can create a Cluster. Continuing rest of normal deployment.
+
+{{ end -}}
+
+{{- if $clusterEnabled -}}
+1. Watch etcd cluster start
+ kubectl get pods -l etcd_cluster={{ .Values.etcdCluster.name }} --namespace {{ .Release.Namespace }} -w
+
+2. Confirm etcd cluster is healthy
+ $ kubectl run --rm -i --tty --env="ETCDCTL_API=3" --env="ETCDCTL_ENDPOINTS=http://{{ .Values.etcdCluster.name }}-client:2379" --namespace {{ .Release.Namespace }} etcd-test --image quay.io/coreos/etcd --restart=Never -- /bin/sh -c 'watch -n1 "etcdctl member list"'
+
+3. Interact with the cluster!
+ $ kubectl run --rm -i --tty --env ETCDCTL_API=3 --namespace {{ .Release.Namespace }} etcd-test --image quay.io/coreos/etcd --restart=Never -- /bin/sh
+ / # etcdctl --endpoints http://{{ .Values.etcdCluster.name }}-client:2379 put foo bar
+ / # etcdctl --endpoints http://{{ .Values.etcdCluster.name }}-client:2379 get foo
+ OK
+ (ctrl-D to exit)
+
+4. Optional
+ Check the etcd-operator logs
+ export POD=$(kubectl get pods -l app={{ template "etcd-operator.fullname" . }} --namespace {{ .Release.Namespace }} --output name)
+ kubectl logs $POD --namespace={{ .Release.Namespace }}
+
+{{- else -}}
+1. etcd-operator deployed.
+ If you would like to deploy an etcd-cluster set cluster.enabled to true in values.yaml
+ Check the etcd-operator logs
+ export POD=$(kubectl get pods -l app={{ template "etcd-operator.fullname" . }} --namespace {{ .Release.Namespace }} --output name)
+ kubectl logs $POD --namespace={{ .Release.Namespace }}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/_helpers.tpl b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/_helpers.tpl
new file mode 100644
index 00000000..03f9a26b
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/_helpers.tpl
@@ -0,0 +1,75 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "etcd-operator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "etcd-operator.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.etcdOperator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "etcd-backup-operator.name" -}}
+{{- default .Chart.Name .Values.backupOperator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "etcd-backup-operator.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.backupOperator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "etcd-restore-operator.name" -}}
+{{- default .Chart.Name .Values.restoreOperator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "etcd-restore-operator.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.restoreOperator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the etcd-operator service account to use
+*/}}
+{{- define "etcd-operator.serviceAccountName" -}}
+{{- if .Values.serviceAccount.etcdOperatorServiceAccount.create -}}
+ {{ default (include "etcd-operator.fullname" .) .Values.serviceAccount.etcdOperatorServiceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.etcdOperatorServiceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the backup-operator service account to use
+*/}}
+{{- define "etcd-backup-operator.serviceAccountName" -}}
+{{- if .Values.serviceAccount.backupOperatorServiceAccount.create -}}
+ {{ default (include "etcd-backup-operator.fullname" .) .Values.serviceAccount.backupOperatorServiceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.backupOperatorServiceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the restore-operator service account to use
+*/}}
+{{- define "etcd-restore-operator.serviceAccountName" -}}
+{{- if .Values.serviceAccount.restoreOperatorServiceAccount.create -}}
+ {{ default (include "etcd-restore-operator.fullname" .) .Values.serviceAccount.restoreOperatorServiceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.restoreOperatorServiceAccount.name }}
+{{- end -}}
+{{- end -}} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-etcd-crd.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-etcd-crd.yaml
new file mode 100644
index 00000000..5528f766
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-etcd-crd.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.customResources.createBackupCRD }}
+---
+apiVersion: "etcd.database.coreos.com/v1beta2"
+kind: "EtcdBackup"
+metadata:
+ name: {{ template "etcd-backup-operator.fullname" . }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-backup-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ annotations:
+ "helm.sh/hook": "post-install"
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ clusterName: {{ .Values.etcdCluster.name }}
+{{ toYaml .Values.backupOperator.spec | indent 2 }}
+{{- end}} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-clusterrole-binding.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-clusterrole-binding.yaml
new file mode 100644
index 00000000..526b2454
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-clusterrole-binding.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.rbac.create .Values.deployments.backupOperator }}
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }}
+metadata:
+ name: {{ template "etcd-backup-operator.fullname" . }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+subjects:
+- kind: ServiceAccount
+ name: {{ template "etcd-backup-operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "etcd-operator.fullname" . }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-deployment.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-deployment.yaml
new file mode 100644
index 00000000..d5c421c1
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-deployment.yaml
@@ -0,0 +1,59 @@
+{{- if .Values.deployments.backupOperator }}
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+ name: {{ template "etcd-backup-operator.fullname" . }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-backup-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "etcd-backup-operator.fullname" . }}
+ release: {{ .Release.Name }}
+ replicas: {{ .Values.backupOperator.replicaCount }}
+ template:
+ metadata:
+ name: {{ template "etcd-backup-operator.fullname" . }}
+ labels:
+ app: {{ template "etcd-backup-operator.fullname" . }}
+ release: {{ .Release.Name }}
+ spec:
+ serviceAccountName: {{ template "etcd-backup-operator.serviceAccountName" . }}
+ containers:
+ - name: {{ .Values.backupOperator.name }}
+ image: "{{ .Values.backupOperator.image.repository }}:{{ .Values.backupOperator.image.tag }}"
+ imagePullPolicy: {{ .Values.backupOperator.image.pullPolicy }}
+ command:
+ - etcd-backup-operator
+{{- range $key, $value := .Values.backupOperator.commandArgs }}
+ - "--{{ $key }}={{ $value }}"
+{{- end }}
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ resources:
+ limits:
+ cpu: {{ .Values.backupOperator.resources.cpu }}
+ memory: {{ .Values.backupOperator.resources.memory }}
+ requests:
+ cpu: {{ .Values.backupOperator.resources.cpu }}
+ memory: {{ .Values.backupOperator.resources.memory }}
+ {{- if .Values.backupOperator.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.backupOperator.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.backupOperator.tolerations }}
+ tolerations:
+{{ toYaml .Values.backupOperator.tolerations | indent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-service-account.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-service-account.yaml
new file mode 100644
index 00000000..06aec3df
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/backup-operator-service-account.yaml
@@ -0,0 +1,12 @@
+{{- if and .Values.serviceAccount.backupOperatorServiceAccount.create .Values.deployments.backupOperator }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "etcd-backup-operator.serviceAccountName" . }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-backup-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/etcd-cluster-crd.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/etcd-cluster-crd.yaml
new file mode 100644
index 00000000..0d385d8f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/etcd-cluster-crd.yaml
@@ -0,0 +1,25 @@
+{{- if .Values.customResources.createEtcdClusterCRD }}
+---
+apiVersion: "etcd.database.coreos.com/v1beta2"
+kind: "EtcdCluster"
+metadata:
+ name: {{ .Values.etcdCluster.name }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ annotations:
+ "helm.sh/hook": "post-install"
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ size: {{ .Values.etcdCluster.size }}
+ version: "{{ .Values.etcdCluster.version }}"
+ pod:
+{{ toYaml .Values.etcdCluster.pod | indent 4 }}
+ {{- if .Values.etcdCluster.enableTLS }}
+ TLS:
+{{ toYaml .Values.etcdCluster.tls | indent 4 }}
+ {{- end }}
+{{- end }}
+
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-cluster-role.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-cluster-role.yaml
new file mode 100644
index 00000000..62085978
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-cluster-role.yaml
@@ -0,0 +1,49 @@
+{{- if .Values.rbac.create }}
+---
+apiVersion: rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }}
+kind: ClusterRole
+metadata:
+ name: {{ template "etcd-operator.fullname" . }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+rules:
+- apiGroups:
+ - etcd.database.coreos.com
+ resources:
+ - etcdclusters
+ - etcdbackups
+ - etcdrestores
+ verbs:
+ - "*"
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - "*"
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ - endpoints
+ - persistentvolumeclaims
+ - events
+ verbs:
+ - "*"
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - "*"
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-clusterrole-binding.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-clusterrole-binding.yaml
new file mode 100644
index 00000000..09594ccc
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-clusterrole-binding.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.rbac.create .Values.deployments.etcdOperator }}
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/{{ required "A valid .Values.rbac.apiVersion entry required!" .Values.rbac.apiVersion }}
+metadata:
+ name: {{ template "etcd-operator.fullname" . }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+subjects:
+- kind: ServiceAccount
+ name: {{ template "etcd-operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "etcd-operator.fullname" . }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-deployment.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-deployment.yaml
new file mode 100644
index 00000000..bb6b1a75
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-deployment.yaml
@@ -0,0 +1,81 @@
+{{- if .Values.deployments.etcdOperator }}
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+ name: {{ template "etcd-operator.fullname" . }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "etcd-operator.fullname" . }}
+ release: {{ .Release.Name }}
+ replicas: {{ .Values.etcdOperator.replicaCount }}
+ template:
+ metadata:
+ name: {{ template "etcd-operator.fullname" . }}
+ labels:
+ app: {{ template "etcd-operator.fullname" . }}
+ release: {{ .Release.Name }}
+ spec:
+ serviceAccountName: {{ template "etcd-operator.serviceAccountName" . }}
+ containers:
+ - name: {{ template "etcd-operator.fullname" . }}
+ image: "{{ .Values.etcdOperator.image.repository }}:{{ .Values.etcdOperator.image.tag }}"
+ imagePullPolicy: {{ .Values.etcdOperator.image.pullPolicy }}
+ command:
+ - etcd-operator
+{{- range $key, $value := .Values.etcdOperator.commandArgs }}
+ - "--{{ $key }}={{ $value }}"
+{{- end }}
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ resources:
+ limits:
+ cpu: {{ .Values.etcdOperator.resources.cpu }}
+ memory: {{ .Values.etcdOperator.resources.memory }}
+ requests:
+ cpu: {{ .Values.etcdOperator.resources.cpu }}
+ memory: {{ .Values.etcdOperator.resources.memory }}
+ {{- if .Values.etcdOperator.livenessProbe.enabled }}
+ livenessProbe:
+ httpGet:
+ path: /readyz
+ port: 8080
+ initialDelaySeconds: {{ .Values.etcdOperator.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.etcdOperator.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.etcdOperator.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.etcdOperator.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.etcdOperator.livenessProbe.failureThreshold }}
+ {{- end}}
+ {{- if .Values.etcdOperator.readinessProbe.enabled }}
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 8080
+ initialDelaySeconds: {{ .Values.etcdOperator.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.etcdOperator.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.etcdOperator.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.etcdOperator.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.etcdOperator.readinessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.etcdOperator.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.etcdOperator.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.etcdOperator.tolerations }}
+ tolerations:
+{{ toYaml .Values.etcdOperator.tolerations | indent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-service-account.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-service-account.yaml
new file mode 100644
index 00000000..2faba8af
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/operator-service-account.yaml
@@ -0,0 +1,12 @@
+{{- if and .Values.serviceAccount.etcdOperatorServiceAccount.create .Values.deployments.etcdOperator }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "etcd-operator.serviceAccountName" . }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-etcd-crd.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-etcd-crd.yaml
new file mode 100644
index 00000000..73faaab8
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-etcd-crd.yaml
@@ -0,0 +1,28 @@
+{{- if .Values.customResources.createRestoreCRD }}
+---
+apiVersion: "etcd.database.coreos.com/v1beta2"
+kind: "EtcdRestore"
+metadata:
+ # An EtcdCluster with the same name will be created
+ name: {{ .Values.etcdCluster.name }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-restore-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ annotations:
+ "helm.sh/hook": "post-install"
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ clusterSpec:
+ size: {{ .Values.etcdCluster.size }}
+ baseImage: "{{ .Values.etcdCluster.image.repository }}"
+ version: {{ .Values.etcdCluster.image.tag }}
+ pod:
+{{ toYaml .Values.etcdCluster.pod | indent 6 }}
+ {{- if .Values.etcdCluster.enableTLS }}
+ TLS:
+{{ toYaml .Values.etcdCluster.tls | indent 6 }}
+ {{- end }}
+{{ toYaml .Values.restoreOperator.spec | indent 2 }}
+{{- end}} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-clusterrole-binding.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-clusterrole-binding.yaml
new file mode 100644
index 00000000..9a6696ef
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-clusterrole-binding.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.rbac.create .Values.deployments.restoreOperator }}
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }}
+metadata:
+ name: {{ template "etcd-restore-operator.fullname" . }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-restore-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+subjects:
+- kind: ServiceAccount
+ name: {{ template "etcd-restore-operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "etcd-operator.fullname" . }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-deployment.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-deployment.yaml
new file mode 100644
index 00000000..5c4784de
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-deployment.yaml
@@ -0,0 +1,63 @@
+{{- if .Values.deployments.restoreOperator }}
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+ name: {{ template "etcd-restore-operator.fullname" . }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-restore-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "etcd-restore-operator.name" . }}
+ release: {{ .Release.Name }}
+ replicas: {{ .Values.restoreOperator.replicaCount }}
+ template:
+ metadata:
+ name: {{ template "etcd-restore-operator.fullname" . }}
+ labels:
+ app: {{ template "etcd-restore-operator.name" . }}
+ release: {{ .Release.Name }}
+ spec:
+ serviceAccountName: {{ template "etcd-restore-operator.serviceAccountName" . }}
+ containers:
+ - name: {{ .Values.restoreOperator.name }}
+ image: "{{ .Values.restoreOperator.image.repository }}:{{ .Values.restoreOperator.image.tag }}"
+ imagePullPolicy: {{ .Values.restoreOperator.image.pullPolicy }}
+ ports:
+ - containerPort: {{ .Values.restoreOperator.port }}
+ command:
+ - etcd-restore-operator
+{{- range $key, $value := .Values.restoreOperator.commandArgs }}
+ - "--{{ $key }}={{ $value }}"
+{{- end }}
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: SERVICE_ADDR
+ value: "{{ .Values.restoreOperator.name }}:{{ .Values.restoreOperator.port }}"
+ resources:
+ limits:
+ cpu: {{ .Values.restoreOperator.resources.cpu }}
+ memory: {{ .Values.restoreOperator.resources.memory }}
+ requests:
+ cpu: {{ .Values.restoreOperator.resources.cpu }}
+ memory: {{ .Values.restoreOperator.resources.memory }}
+ {{- if .Values.restoreOperator.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.restoreOperator.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.restoreOperator.tolerations }}
+ tolerations:
+{{ toYaml .Values.restoreOperator.tolerations | indent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-service-account.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-service-account.yaml
new file mode 100644
index 00000000..595cee92
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-service-account.yaml
@@ -0,0 +1,12 @@
+{{- if and .Values.serviceAccount.restoreOperatorServiceAccount.create .Values.deployments.restoreOperator }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "etcd-restore-operator.serviceAccountName" . }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-restore-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-service.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-service.yaml
new file mode 100644
index 00000000..052be364
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/templates/restore-operator-service.yaml
@@ -0,0 +1,20 @@
+{{- if .Values.deployments.restoreOperator }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Values.restoreOperator.name }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "etcd-restore-operator.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ ports:
+ - protocol: TCP
+ name: http-etcd-restore-port
+ port: {{ .Values.restoreOperator.port }}
+ selector:
+ app: {{ template "etcd-restore-operator.name" . }}
+ release: {{ .Release.Name }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/etcd-operator/values.yaml b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/values.yaml
new file mode 100644
index 00000000..1260e077
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/etcd-operator/values.yaml
@@ -0,0 +1,153 @@
+# Default values for etcd-operator.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+## Install Default RBAC roles and bindings
+rbac:
+ create: true
+ apiVersion: v1beta1
+
+## Service account names and whether to create them
+serviceAccount:
+ etcdOperatorServiceAccount:
+ create: true
+ name:
+ backupOperatorServiceAccount:
+ create: true
+ name:
+ restoreOperatorServiceAccount:
+ create: true
+ name:
+
+# Select what to deploy
+deployments:
+ etcdOperator: true
+ # one time deployment, delete once completed,
+ # Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/backup-operator.md
+ backupOperator: true
+ # one time deployment, delete once completed
+ # Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/restore-operator.md
+ restoreOperator: true
+
+# creates custom resources, not all required,
+# you could use `helm template --values <values.yaml> --name release_name ... `
+# and create the resources yourself to deploy on your cluster later
+customResources:
+ createEtcdClusterCRD: false
+ createBackupCRD: false
+ createRestoreCRD: false
+
+# etcdOperator
+etcdOperator:
+ name: etcd-operator
+ replicaCount: 1
+ image:
+ repository: quay.io/coreos/etcd-operator
+ tag: v0.9.3
+ pullPolicy: Always
+ resources:
+ cpu: 100m
+ memory: 128Mi
+ ## Node labels for etcd-operator pod assignment
+ ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ nodeSelector: {}
+ ## additional command arguments go here; will be translated to `--key=value` form
+ ## e.g., analytics: true
+ commandArgs:
+ cluster-wide: true
+ ## Configurable health checks against the /readyz endpoint that etcd-operator exposes
+ readinessProbe:
+ enabled: false
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ enabled: false
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
+ successThreshold: 1
+ failureThreshold: 3
+# backup spec
+backupOperator:
+ name: etcd-backup-operator
+ replicaCount: 1
+ image:
+ repository: quay.io/coreos/etcd-operator
+ tag: v0.9.3
+ pullPolicy: Always
+ resources:
+ cpu: 100m
+ memory: 128Mi
+ spec:
+ storageType: S3
+ s3:
+ s3Bucket:
+ awsSecret:
+ ## Node labels for etcd pod assignment
+ ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ nodeSelector: {}
+ ## additional command arguments go here; will be translated to `--key=value` form
+ ## e.g., analytics: true
+ commandArgs: {}
+
+# restore spec
+restoreOperator:
+ name: etcd-restore-operator
+ replicaCount: 1
+ image:
+ repository: quay.io/coreos/etcd-operator
+ tag: v0.9.3
+ pullPolicy: Always
+ port: 19999
+ resources:
+ cpu: 100m
+ memory: 128Mi
+ spec:
+ s3:
+ # The format of "path" must be: "<s3-bucket-name>/<path-to-backup-file>"
+ # e.g: "etcd-snapshot-bucket/v1/default/example-etcd-cluster/3.2.10_0000000000000001_etcd.backup"
+ path:
+ awsSecret:
+ ## Node labels for etcd pod assignment
+ ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ nodeSelector: {}
+ ## additional command arguments go here; will be translated to `--key=value` form
+ ## e.g., analytics: true
+ commandArgs: {}
+
+## etcd-cluster specific values
+etcdCluster:
+ name: etcd
+ size: 3
+ version: 3.2.25
+ image:
+ repository: quay.io/coreos/etcd
+ tag: v3.2.25
+ pullPolicy: Always
+ enableTLS: false
+ # TLS configs
+ tls:
+ static:
+ member:
+ peerSecret: etcd-peer-tls
+ serverSecret: etcd-server-tls
+ operatorSecret: etcd-client-tls
+ ## etcd cluster pod specific values
+ ## Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-members-cluster-with-resource-requirement
+ pod:
+ ## Antiaffinity for etcd pod assignment
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ antiAffinity: false
+ resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ ## Node labels for etcd pod assignment
+ ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ nodeSelector: {}
diff --git a/vnfs/DAaaS/deploy/operator/charts/m3db-operator/Chart.yaml b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/Chart.yaml
new file mode 100644
index 00000000..ebdc0b40
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/Chart.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+name: m3db-operator
+version: 0.1.3
+# TODO(PS) - helm has issues with GKE's SemVer
+# Error: Chart requires kubernetesVersion: >=1.10.6 which is incompatible with Kubernetes v1.10.7-gke.2
+#
+#kubeVersion: ">=1.10.7"
+description: Kubernetes operator for M3DB timeseries database
+keywords:
+ - operator
+ - m3
+home: https://github.com/m3db/m3db-operator
+sources:
+ - https://github.com/m3db/m3db-operator
+maintainers:
+ - name: m3 Authors
+ email: m3db@googlegroups.com
+ url: https://operator.m3db.io/
+engine: gotpl
+icon: https://raw.githubusercontent.com/m3db/m3/master/docs/theme/assets/images/M3-logo.png
+appVersion: ">0.4.7"
+tillerVersion: ">=2.11.0"
diff --git a/vnfs/DAaaS/deploy/operator/charts/m3db-operator/LICENSE b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vnfs/DAaaS/deploy/operator/charts/m3db-operator/NOTES.txt b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/NOTES.txt
new file mode 100644
index 00000000..ca4143db
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/NOTES.txt
@@ -0,0 +1,12 @@
+ ___ _ _
+ / _ \ _ __ ___ _ __ __ _| |_ ___ _ __ | |__ __ _ ___
+| | | | '_ \ / _ \ '__/ _` | __/ _ \| '__| | '_ \ / _` / __|
+| |_| | |_) | __/ | | (_| | || (_) | | | | | | (_| \__ \
+ \___/| .__/ \___|_| \__,_|\__\___/|_| |_| |_|\__,_|___/
+ |_|
+ _ _ _ _ _ _
+| |__ ___ ___ _ __ (_)_ __ ___| |_ __ _| | | ___ __| |
+| '_ \ / _ \/ _ \ '_ \ | | '_ \/ __| __/ _` | | |/ _ \/ _` |
+| |_) | __/ __/ | | | | | | | \__ \ || (_| | | | __/ (_| |
+|_.__/ \___|\___|_| |_| |_|_| |_|___/\__\__,_|_|_|\___|\__,_|
+
diff --git a/vnfs/DAaaS/deploy/operator/charts/m3db-operator/README.md b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/README.md
new file mode 100644
index 00000000..0a532d31
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/README.md
@@ -0,0 +1,14 @@
+### Helm Charts for M3DB clusters on Kubernetes
+
+### Prerequisite
+
+[Install helm](https://docs.helm.sh/using_helm/#installing-helm)
+
+### Installing m3db-operator chart
+
+```
+cd helm/m3db-operator
+helm package .
+helm install m3db-operator-0.0.1.tgz
+```
+
diff --git a/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/cluster_role.yaml b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/cluster_role.yaml
new file mode 100644
index 00000000..7bf41739
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/cluster_role.yaml
@@ -0,0 +1,35 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ .Values.operator.name }}
+rules:
+- apiGroups: ["extensions"]
+ resources: ["deployments", "replicasets", "daemonsets"]
+ verbs: ["create", "get", "update", "delete", "list"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["create", "get", "update", "delete", "list"]
+- apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "create", "delete", "deletecollection"]
+- apiGroups: [""]
+ resources: ["persistentvolumes", "persistentvolumeclaims", "services", "secrets", "configmaps"]
+ verbs: ["create", "get", "update", "delete", "list"]
+- apiGroups: ["batch"]
+ resources: ["cronjobs", "jobs"]
+ verbs: ["create", "get", "deletecollection", "delete"]
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["list", "get", "watch", "update"]
+- apiGroups: ["apps"]
+ resources: ["statefulsets", "deployments"]
+ verbs: ["*"]
+- apiGroups: ["operator.m3db.io"]
+ resources: ["*"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch"]
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch"]
diff --git a/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/cluster_role_binding.yaml b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/cluster_role_binding.yaml
new file mode 100644
index 00000000..876a6705
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/cluster_role_binding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ .Values.operator.name }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ .Values.operator.name }}
+subjects:
+- kind: ServiceAccount
+ name: {{ .Values.operator.name }}
+ namespace: {{ .Release.Namespace }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/service_account.yaml b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/service_account.yaml
new file mode 100644
index 00000000..a65e90bc
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/service_account.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Values.operator.name }}
+ namespace: {{ .Release.Namespace }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/stateful_set.yaml b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/stateful_set.yaml
new file mode 100644
index 00000000..d1002378
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/templates/stateful_set.yaml
@@ -0,0 +1,26 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ .Values.operator.name }}
+ namespace: {{ .Release.Namespace }}
+spec:
+ serviceName: {{ .Values.operator.name }}
+ replicas: 1
+ selector:
+ matchLabels:
+ name: {{ .Values.operator.name }}
+ template:
+ metadata:
+ labels:
+ name: {{ .Values.operator.name }}
+ spec:
+ containers:
+ - name: {{ .Values.operator.name }}
+ image: {{ .Values.image.repository}}:{{ .Values.image.tag }}
+ command:
+ - m3db-operator
+ imagePullPolicy: Always
+ env:
+ - name: ENVIRONMENT
+ value: {{ .Values.environment }}
+ serviceAccount: {{ .Values.operator.name }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/m3db-operator/values.yaml b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/values.yaml
new file mode 100644
index 00000000..8411d77e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/m3db-operator/values.yaml
@@ -0,0 +1,6 @@
+operator:
+ name: m3db-operator
+image:
+ repository: quay.io/m3db/m3db-operator
+ tag: v0.1.3
+environment: production
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/.helmignore b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/.helmignore
new file mode 100644
index 00000000..9797d317
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/.helmignore
@@ -0,0 +1,25 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+# helm/charts
+OWNERS
+hack/
+ci/
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/Chart.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/Chart.yaml
new file mode 100644
index 00000000..2198963b
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+description: Provides easy monitoring definitions for Kubernetes services, and deployment and management of Prometheus instances.
+icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
+engine: gotpl
+maintainers:
+ - name: gianrubio
+ email: gianrubio@gmail.com
+name: prometheus-operator
+sources:
+ - https://github.com/coreos/prometheus-operator
+ - https://coreos.com/operators/prometheus
+version: 4.1.1
+appVersion: 0.29.0
+home: https://github.com/coreos/prometheus-operator
+keywords:
+- operator
+- prometheus
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/README.md b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/README.md
new file mode 100644
index 00000000..8966f03a
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/README.md
@@ -0,0 +1,428 @@
+# prometheus-operator
+
+Installs [prometheus-operator](https://github.com/coreos/prometheus-operator) to create/configure/manage Prometheus clusters atop Kubernetes. This chart includes multiple components and is suitable for a variety of use-cases.
+
+The default installation is intended to suit monitoring a kubernetes cluster the chart is deployed onto. It is closely matches the kube-prometheus project.
+- [prometheus-operator](https://github.com/coreos/prometheus-operator)
+- [prometheus](https://prometheus.io/)
+- [alertmanager](https://prometheus.io/)
+- [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter)
+- [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics)
+- [grafana](https://github.com/helm/charts/tree/master/stable/grafana)
+- service monitors to scrape internal kubernetes components
+ - kube-apiserver
+ - kube-scheduler
+ - kube-controller-manager
+ - etcd
+ - kube-dns/coredns
+With the installation, the chart also includes dashboards and alerts.
+
+The same chart can be used to run multiple prometheus instances in the same cluster if required. To achieve this, the other components need to be disabled - it is necessary to run only one instance of prometheus-operator and a pair of alertmanager pods for an HA configuration.
+
+## TL;DR;
+
+```console
+$ helm install stable/prometheus-operator
+```
+
+## Introduction
+
+This chart bootstraps a [prometheus-operator](https://github.com/coreos/prometheus-operator) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. The chart can be installed multiple times to create separate Prometheus instances managed by Prometheus Operator.
+
+## Prerequisites
+ - Kubernetes 1.10+ with Beta APIs
+ - Helm 2.10+ (For a workaround using an earlier version see [below](#helm-210-workaround))
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+$ helm install --name my-release stable/prometheus-operator
+```
+
+The command deploys prometheus-operator on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+The default installation includes Prometheus Operator, Alertmanager, Grafana, and configuration for scraping Kubernetes infrastructure.
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` deployment:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+CRDs created by this chart are not removed by default and should be manually cleaned up:
+
+```
+kubectl delete crd prometheuses.monitoring.coreos.com
+kubectl delete crd prometheusrules.monitoring.coreos.com
+kubectl delete crd servicemonitors.monitoring.coreos.com
+kubectl delete crd alertmanagers.monitoring.coreos.com
+```
+
+## Configuration
+
+The following tables lists the configurable parameters of the prometheus-operator chart and their default values.
+
+### General
+| Parameter | Description | Default |
+| ----- | ----------- | ------ |
+| `nameOverride` | Provide a name in place of `prometheus-operator` |`""`|
+| `fullNameOverride` | Provide a name to substitute for the full names of resources |`""`|
+| `commonLabels` | Labels to apply to all resources | `[]` |
+| `defaultRules.create` | Create default rules for monitoring the cluster | `true` |
+| `defaultRules.rules.alertmanager` | Create default rules for Alert Manager | `true` |
+| `defaultRules.rules.etcd` | Create default rules for ETCD | `true` |
+| `defaultRules.rules.general` | Create General default rules| `true` |
+| `defaultRules.rules.k8s` | Create K8S default rules| `true` |
+| `defaultRules.rules.kubeApiserver` | Create Api Server default rules| `true` |
+| `defaultRules.rules.kubePrometheusNodeAlerting` | Create Node Alerting default rules| `true` |
+| `defaultRules.rules.kubePrometheusNodeRecording` | Create Node Recording default rules| `true` |
+| `defaultRules.rules.kubeScheduler` | Create Kubernetes Scheduler default rules| `true` |
+| `defaultRules.rules.kubernetesAbsent` | Create Kubernetes Absent (example API Server down) default rules| `true` |
+| `defaultRules.rules.kubernetesApps` | Create Kubernetes Apps default rules| `true` |
+| `defaultRules.rules.kubernetesResources` | Create Kubernetes Resources default rules| `true` |
+| `defaultRules.rules.kubernetesStorage` | Create Kubernetes Storage default rules| `true` |
+| `defaultRules.rules.kubernetesSystem` | Create Kubernetes System default rules| `true` |
+| `defaultRules.rules.node` | Create Node default rules| `true` |
+| `defaultRules.rules.PrometheusOperator` | Create Prometheus Operator default rules| `true` |
+| `defaultRules.rules.prometheus` | Create Prometheus default rules| `true` |
+| `defaultRules.labels` | Labels for default rules for monitoring the cluster | `{}` |
+| `defaultRules.annotations` | Annotations for default rules for monitoring the cluster | `{}` |
+| `additionalPrometheusRules` | List of `prometheusRule` objects to create. See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusrulespec. | `[]` |
+| `global.rbac.create` | Create RBAC resources | `true` |
+| `global.rbac.pspEnabled` | Create pod security policy resources | `true` |
+| `global.imagePullSecrets` | Reference to one or more secrets to be used when pulling images | `[]` |
+
+### Prometheus Operator
+| Parameter | Description | Default |
+| ----- | ----------- | ------ |
+| `prometheusOperator.enabled` | Deploy Prometheus Operator. Only one of these should be deployed into the cluster | `true` |
+| `prometheusOperator.serviceAccount` | Create a serviceaccount for the operator | `true` |
+| `prometheusOperator.name` | Operator serviceAccount name | `""` |
+| `prometheusOperator.logFormat` | Operator log output formatting | `"logfmt"` |
+| `prometheusOperator.logLevel` | Operator log level. Possible values: "all", "debug", "info", "warn", "error", "none" | `"info"` |
+| `prometheusOperator.createCustomResource` | Create CRDs. Required if deploying anything besides the operator itself as part of the release. The operator will create / update these on startup. If your Helm version < 2.10 you will have to either create the CRDs first or deploy the operator first, then the rest of the resources | `true` |
+| `prometheusOperator.crdApiGroup` | Specify the API Group for the CustomResourceDefinitions | `monitoring.coreos.com` |
+| `prometheusOperator.cleanupCustomResource` | Attempt to delete CRDs when the release is removed. This option may be useful while testing but is not recommended, as deleting the CRD definition will delete resources and prevent the operator from being able to clean up resources that it manages | `false` |
+| `prometheusOperator.podLabels` | Labels to add to the operator pod | `{}` |
+| `prometheusOperator.priorityClassName` | Name of Priority Class to assign pods | `nil` |
+| `prometheusOperator.kubeletService.enabled` | If true, the operator will create and maintain a service for scraping kubelets | `true` |
+| `prometheusOperator.kubeletService.namespace` | Namespace to deploy kubelet service | `kube-system` |
+| `prometheusOperator.serviceMonitor.selfMonitor` | Enable monitoring of prometheus operator | `true` |
+| `prometheusOperator.service.type` | Prometheus operator service type | `ClusterIP` |
+| `prometheusOperator.service.clusterIP` | Prometheus operator service clusterIP IP | `""` |
+| `prometheusOperator.service.nodePort` | Port to expose prometheus operator service on each node | `38080` |
+| `prometheusOperator.service.annotations` | Annotations to be added to the prometheus operator service | `{}` |
+| `prometheusOperator.service.labels` | Prometheus Operator Service Labels | `{}` |
+| `prometheusOperator.service.externalIPs` | List of IP addresses at which the Prometheus Operator server service is available | `[]` |
+| `prometheusOperator.service.loadBalancerIP` | Prometheus Operator Loadbalancer IP | `""` |
+| `prometheusOperator.service.loadBalancerSourceRanges` | Prometheus Operator Load Balancer Source Ranges | `[]` |
+| `prometheusOperator.resources` | Resource limits for prometheus operator | `{}` |
+| `prometheusOperator.securityContext` | SecurityContext for prometheus operator | `{"runAsNonRoot": true, "runAsUser": 65534}` |
+| `prometheusOperator.nodeSelector` | Prometheus operator node selector https://kubernetes.io/docs/user-guide/node-selection/ | `{}` |
+| `prometheusOperator.tolerations` | Tolerations for use with node taints https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | `[]` |
+| `prometheusOperator.affinity` | Assign the prometheus operator to run on specific nodes https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | `{}` |
+| `prometheusOperator.image.repository` | Repository for prometheus operator image | `quay.io/coreos/prometheus-operator` |
+| `prometheusOperator.image.tag` | Tag for prometheus operator image | `v0.29.0` |
+| `prometheusOperator.image.pullPolicy` | Pull policy for prometheus operator image | `IfNotPresent` |
+| `prometheusOperator.configmapReloadImage.repository` | Repository for configmapReload image | `quay.io/coreos/configmap-reload` |
+| `prometheusOperator.configmapReloadImage.tag` | Tag for configmapReload image | `v0.0.1` |
+| `prometheusOperator.prometheusConfigReloaderImage.repository` | Repository for config-reloader image | `quay.io/coreos/prometheus-config-reloader` |
+| `prometheusOperator.prometheusConfigReloaderImage.tag` | Tag for config-reloader image | `v0.29.0` |
+| `prometheusOperator.hyperkubeImage.repository` | Repository for hyperkube image used to perform maintenance tasks | `k8s.gcr.io/hyperkube` |
+| `prometheusOperator.hyperkubeImage.tag` | Tag for hyperkube image used to perform maintenance tasks | `v1.12.1` |
+| `prometheusOperator.hyperkubeImage.repository` | Image pull policy for hyperkube image used to perform maintenance tasks | `IfNotPresent` |
+
+### Prometheus
+| Parameter | Description | Default |
+| ----- | ----------- | ------ |
+| `prometheus.enabled` | Deploy prometheus | `true` |
+| `prometheus.serviceMonitor.selfMonitor` | Create a `serviceMonitor` to automatically monitor the prometheus instance | `true` |
+| `prometheus.serviceAccount.create` | Create a default serviceaccount for prometheus to use | `true` |
+| `prometheus.serviceAccount.name` | Name for prometheus serviceaccount | `""` |
+| `prometheus.rbac.roleNamespaces` | Create role bindings in the specified namespaces, to allow Prometheus monitoring a role binding in the release namespace will always be created. | `["kube-system"]` |
+| `prometheus.podDisruptionBudget.enabled` | If true, create a pod disruption budget for prometheus pods. The created resource cannot be modified once created - it must be deleted to perform a change | `true` |
+| `prometheus.podDisruptionBudget.minAvailable` | Minimum number / percentage of pods that should remain scheduled | `1` |
+| `prometheus.podDisruptionBudget.maxUnavailable` | Maximum number / percentage of pods that may be made unavailable | `""` |
+| `prometheus.ingress.enabled` | If true, Prometheus Ingress will be created | `false` |
+| `prometheus.ingress.annotations` | Prometheus Ingress annotations | `{}` |
+| `prometheus.ingress.labels` | Prometheus Ingress additional labels | `{}` |
+| `prometheus.ingress.hosts` | Prometheus Ingress hostnames | `[]` |
+| `prometheus.ingress.tls` | Prometheus Ingress TLS configuration (YAML) | `[]` |
+| `prometheus.service.type` | Prometheus Service type | `ClusterIP` |
+| `prometheus.service.clusterIP` | Prometheus service clusterIP IP | `""` |
+| `prometheus.service.targetPort` | Prometheus Service internal port | `9090` |
+| `prometheus.service.nodePort` | Prometheus Service port for NodePort service type | `39090` |
+| `prometheus.service.annotations` | Prometheus Service Annotations | `{}` |
+| `prometheus.service.labels` | Prometheus Service Labels | `{}` |
+| `prometheus.service.externalIPs` | List of IP addresses at which the Prometheus server service is available | `[]` |
+| `prometheus.service.loadBalancerIP` | Prometheus Loadbalancer IP | `""` |
+| `prometheus.service.loadBalancerSourceRanges` | Prometheus Load Balancer Source Ranges | `[]` |
+| `prometheus.service.sessionAffinity` | Prometheus Service Session Affinity | `""` |
+| `prometheus.additionalServiceMonitors` | List of `serviceMonitor` objects to create. See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitorspec | `[]` |
+| `prometheus.prometheusSpec.podMetadata` | Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata Metadata Labels and Annotations gets propagated to the prometheus pods. | `{}` |
+| `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` | If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the prometheus resource to be created with selectors based on values in the helm deployment, which will also match the servicemonitors created | `true` |
+| `prometheus.prometheusSpec.serviceMonitorSelector` | ServiceMonitors to be selected for target discovery. If {}, select all ServiceMonitors | `{}` |
+| `prometheus.prometheusSpec.serviceMonitorNamespaceSelector` | Namespaces to be selected for ServiceMonitor discovery. See [namespaceSelector](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector) for usage | `{}` |
+| `prometheus.prometheusSpec.image.repository` | Base image to use for a Prometheus deployment. | `quay.io/prometheus/prometheus` |
+| `prometheus.prometheusSpec.image.tag` | Tag of Prometheus container image to be deployed. | `v2.7.1` |
+| `prometheus.prometheusSpec.paused` | When a Prometheus deployment is paused, no actions except for deletion will be performed on the underlying objects. | `false` |
+| `prometheus.prometheusSpec.replicas` | Number of instances to deploy for a Prometheus deployment. | `1` |
+| `prometheus.prometheusSpec.retention` | Time duration Prometheus shall retain data for. Must match the regular expression `[0-9]+(ms\|s\|m\|h\|d\|w\|y)` (milliseconds seconds minutes hours days weeks years). | `120h` |
+| `prometheus.prometheusSpec.logLevel` | Log level for Prometheus to be configured with. | `info` |
+| `prometheus.prometheusSpec.scrapeInterval` | Interval between consecutive scrapes. | `""` |
+| `prometheus.prometheusSpec.evaluationInterval` | Interval between consecutive evaluations. | `""` |
+| `prometheus.prometheusSpec.externalLabels` | The labels to add to any time series or alerts when communicating with external systems (federation, remote storage, Alertmanager). | `[]` |
+| `prometheus.prometheusSpec.externalUrl` | The external URL the Prometheus instances will be available under. This is necessary to generate correct URLs. This is necessary if Prometheus is not served from root of a DNS name. | `""` |
+| `prometheus.prometheusSpec.routePrefix` | The route prefix Prometheus registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, but the server serves requests under a different route prefix. For example for use with `kubectl proxy`. | `/` |
+| `prometheus.prometheusSpec.storageSpec` | Storage spec to specify how storage shall be used. | `{}` |
+| `prometheus.prometheusSpec.ruleSelectorNilUsesHelmValues` | If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the prometheus resource to be created with selectors based on values in the helm deployment, which will also match the PrometheusRule resources created. | `true` |
+| `prometheus.prometheusSpec.ruleSelector` | A selector to select which PrometheusRules to mount for loading alerting rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom resources selected by RuleSelector. Make sure it does not match any config maps that you do not want to be migrated. If {}, select all PrometheusRules | `{}` |
+| `prometheus.prometheusSpec.ruleNamespaceSelector` | Namespaces to be selected for PrometheusRules discovery. If nil, select own namespace. See [namespaceSelector](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector) for usage | `{}` |
+| `prometheus.prometheusSpec.alertingEndpoints` | Alertmanagers to which alerts will be sent https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints Default configuration will connect to the alertmanager deployed as part of this release | `[]` |
+| `prometheus.prometheusSpec.resources` | Define resources requests and limits for single Pods. | `{}` |
+| `prometheus.prometheusSpec.nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` |
+| `prometheus.prometheusSpec.secrets` | Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. The Secrets are mounted into /etc/prometheus/secrets/<secret-name>. Secrets changes after initial creation of a Prometheus object are not reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated with the new list of secrets. | `[]` |
+| `prometheus.prometheusSpec.configMaps` | ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/ | `[]` |
+|`prometheus.prometheusSpec.podAntiAffinity` | Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. | `""` |
+|`prometheus.prometheusSpec.podAntiAffinityTopologyKey` | If anti-affinity is enabled sets the topologyKey to use for anti-affinity. This can be changed to, for example `failure-domain.beta.kubernetes.io/zone`| `kubernetes.io/hostname` |
+| `prometheus.prometheusSpec.tolerations` | If specified, the pod's tolerations. | `[]` |
+| `prometheus.prometheusSpec.remoteWrite` | If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way. | `[]` |
+| `prometheus.prometheusSpec.remoteRead` | If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way. | `[]` |
+| `prometheus.prometheusSpec.securityContext` | SecurityContext holds pod-level security attributes and common container settings. This defaults to non root user with uid 1000 and gid 2000 in order to support migration from operator version <0.26. | `{"runAsNonRoot": true, "runAsUser": 1000, "fsGroup": 2000}` |
+| `prometheus.prometheusSpec.listenLocal` | ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. | `false` |
+| `prometheus.prometheusSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. |`[]`|
+| `prometheus.prometheusSpec.additionalScrapeConfigs` | AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>. As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible scrape configs are going to break Prometheus after the upgrade. | `{}` |
+| `prometheus.prometheusSpec.additionalScrapeConfigsExternal` | Enable additional scrape configs that are managed externally to this chart. Note that the prometheus will fail to provision if the correct secret does not exist. | `false` |
+| `prometheus.prometheusSpec.additionalAlertManagerConfigs` | AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<alertmanager_config>. AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. | `{}` |
+| `prometheus.prometheusSpec.additionalAlertRelabelConfigs` | AdditionalAlertRelabelConfigs allows specifying additional Prometheus alert relabel configurations. Alert relabel configurations specified are appended to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel configs are going to break Prometheus after the upgrade. | `[]` |
+| `prometheus.prometheusSpec.thanos` | Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. This section is experimental, it may change significantly without deprecation notice in any release.This is experimental and may change significantly without backward compatibility in any release. See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#thanosspec | `{}` |
+| `prometheus.prometheusSpec.priorityClassName` | Priority class assigned to the Pods | `""` |
+
+### Alertmanager
+| Parameter | Description | Default |
+| ----- | ----------- | ------ |
+| `alertmanager.enabled` | Deploy alertmanager | `true` |
+| `alertmanager.serviceMonitor.selfMonitor` | Create a `serviceMonitor` to automatically monitor the alartmanager instance | `true` |
+| `alertmanager.serviceAccount.create` | Create a `serviceAccount` for alertmanager | `true` |
+| `alertmanager.serviceAccount.name` | Name for Alertmanager service account | `""` |
+| `alertmanager.podDisruptionBudget.enabled` | If true, create a pod disruption budget for Alertmanager pods. The created resource cannot be modified once created - it must be deleted to perform a change | `true` |
+| `alertmanager.podDisruptionBudget.minAvailable` | Minimum number / percentage of pods that should remain scheduled | `1` |
+| `alertmanager.podDisruptionBudget.maxUnavailable` | Maximum number / percentage of pods that may be made unavailable | `""` |
+| `alertmanager.ingress.enabled` | If true, Alertmanager Ingress will be created | `false` |
+| `alertmanager.ingress.annotations` | Alertmanager Ingress annotations | `{}` |
+| `alertmanager.ingress.labels` | Alertmanager Ingress additional labels | `{}` |
+| `alertmanager.ingress.hosts` | Alertmanager Ingress hostnames | `[]` |
+| `alertmanager.ingress.tls` | Alertmanager Ingress TLS configuration (YAML) | `[]` |
+| `alertmanager.service.type` | Alertmanager Service type | `ClusterIP` |
+| `alertmanager.service.clusterIP` | Alertmanager service clusterIP IP | `""` |
+| `alertmanager.service.nodePort` | Alertmanager Service port for NodePort service type | `30903` |
+| `alertmanager.service.annotations` | Alertmanager Service annotations | `{}` |
+| `alertmanager.service.labels` | Alertmanager Service Labels | `{}` |
+| `alertmanager.service.externalIPs` | List of IP addresses at which the Alertmanager server service is available | `[]` |
+| `alertmanager.service.loadBalancerIP` | Alertmanager Loadbalancer IP | `""` |
+| `alertmanager.service.loadBalancerSourceRanges` | Alertmanager Load Balancer Source Ranges | `[]` |
+| `alertmanager.config` | Provide YAML to configure Alertmanager. See https://prometheus.io/docs/alerting/configuration/#configuration-file. The default provided works to suppress the Watchdog alert from `defaultRules.create` | `{"global":{"resolve_timeout":"5m"},"route":{"group_by":["job"],"group_wait":"30s","group_interval":"5m","repeat_interval":"12h","receiver":"null","routes":[{"match":{"alertname":"Watchdog"},"receiver":"null"}]},"receivers":[{"name":"null"}]}` |
+| `alertmanager.alertmanagerSpec.podMetadata` | Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata Metadata Labels and Annotations gets propagated to the prometheus pods. | `{}` |
+| `alertmanager.alertmanagerSpec.image.tag` | Tag of Alertmanager container image to be deployed. | `v0.16.1` |
+| `alertmanager.alertmanagerSpec.image.repository` | Base image that is used to deploy pods, without tag. | `quay.io/prometheus/alertmanager` |
+| `alertmanager.alertmanagerSpec.secrets` | Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/<secret-name>. | `[]` |
+| `alertmanager.alertmanagerSpec.configMaps` | ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/ | `[]` |
+| `alertmanager.alertmanagerSpec.logLevel` | Log level for Alertmanager to be configured with. | `info` |
+| `alertmanager.alertmanagerSpec.replicas` | Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the running cluster equal to the expected size. | `1` |
+| `alertmanager.alertmanagerSpec.retention` | Time duration Alertmanager shall retain data for. Value must match the regular expression `[0-9]+(ms\|s\|m\|h)` (milliseconds seconds minutes hours). | `120h` |
+| `alertmanager.alertmanagerSpec.storage` | Storage is the definition of how storage will be used by the Alertmanager instances. | `{}` |
+| `alertmanager.alertmanagerSpec.externalUrl` | The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. | `""` |
+| `alertmanager.alertmanagerSpec.routePrefix` | The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, but the server serves requests under a different route prefix. For example for use with `kubectl proxy`. | `/` |
+| `alertmanager.alertmanagerSpec.paused` | If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. | `false` |
+| `alertmanager.alertmanagerSpec.nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` |
+| `alertmanager.alertmanagerSpec.resources` | Define resources requests and limits for single Pods. | `{}` |
+| `alertmanager.alertmanagerSpec.podAntiAffinity` | Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. | `""` |
+|`prometheus.prometheusSpec.podAntiAffinityTopologyKey` | If anti-affinity is enabled sets the topologyKey to use for anti-affinity. This can be changed to, for example `failure-domain.beta.kubernetes.io/zone`| `kubernetes.io/hostname` |
+| `alertmanager.alertmanagerSpec.tolerations` | If specified, the pod's tolerations. | `[]` |
+| `alertmanager.alertmanagerSpec.securityContext` | SecurityContext holds pod-level security attributes and common container settings. This defaults to non root user with uid 1000 and gid 2000 in order to support migration from operator version < 0.26 | `{"runAsNonRoot": true, "runAsUser": 1000, "fsGroup": 2000}` |
+| `alertmanager.alertmanagerSpec.listenLocal` | ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. Note this is only for the Alertmanager UI, not the gossip communication. | `false` |
+| `alertmanager.alertmanagerSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. | `[]` |
+| `alertmanager.alertmanagerSpec.priorityClassName` | Priority class assigned to the Pods | `""` |
+| `alertmanager.alertmanagerSpec.additionalPeers` | AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. | `[]` |
+
+### Grafana
+| Parameter | Description | Default |
+| ----- | ----------- | ------ |
+| `grafana.enabled` | If true, deploy the grafana sub-chart | `true` |
+| `grafana.serviceMonitor.selfMonitor` | Create a `serviceMonitor` to automatically monitor the grafana instance | `true` |
+| `grafana.adminPassword` | Admin password to log into the grafana UI | "prom-operator" |
+| `grafana.defaultDashboardsEnabled` | Deploy default dashboards. These are loaded using the sidecar | `true` |
+| `grafana.ingress.enabled` | Enables Ingress for Grafana | `false` |
+| `grafana.ingress.annotations` | Ingress annotations for Grafana | `{}` |
+| `grafana.ingress.labels` | Custom labels for Grafana Ingress | `{}` |
+| `grafana.ingress.hosts` | Ingress accepted hostnames for Grafana| `[]` |
+| `grafana.ingress.tls` | Ingress TLS configuration for Grafana | `[]` |
+| `grafana.sidecar.dashboards.enabled` | Enable the Grafana sidecar to automatically load dashboards with a label `{{ grafana.sidecar.dashboards.label }}=1` | `true` |
+| `grafana.sidecar.dashboards.label` | If the sidecar is enabled, configmaps with this label will be loaded into Grafana as dashboards | `grafana_dashboard` |
+| `grafana.sidecar.datasources.enabled` | Enable the Grafana sidecar to automatically load dashboards with a label `{{ grafana.sidecar.datasources.label }}=1` | `true` |
+| `grafana.sidecar.datasources.label` | If the sidecar is enabled, configmaps with this label will be loaded into Grafana as datasources configurations | `grafana_datasource` |
+| `grafana.rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires rbac.pspEnabled) | `true` |
+| `grafana.extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` |
+
+### Exporters
+| Parameter | Description | Default |
+| ----- | ----------- | ------ |
+| `kubeApiServer.enabled` | Deploy `serviceMonitor` to scrape the Kubernetes API server | `true` |
+| `kubeApiServer.relabelings` | Relablings for the API Server ServiceMonitor | `[]` |
+| `kubeApiServer.tlsConfig.serverName` | Name of the server to use when validating TLS certificate | `kubernetes` |
+| `kubeApiServer.tlsConfig.insecureSkipVerify` | Skip TLS certificate validation when scraping | `false` |
+| `kubeApiServer.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus | `component` |
+| `kubeApiServer.serviceMonitor.selector` | The service selector | `{"matchLabels":{"component":"apiserver","provider":"kubernetes"}}`
+| `kubelet.enabled` | Deploy servicemonitor to scrape the kubelet service. See also `prometheusOperator.kubeletService` | `true` |
+| `kubelet.namespace` | Namespace where the kubelet is deployed. See also `prometheusOperator.kubeletService.namespace` | `kube-system` |
+| `kubelet.serviceMonitor.https` | Enable scraping of the kubelet over HTTPS. For more information, see https://github.com/coreos/prometheus-operator/issues/926 | `false` |
+| `kubelet.serviceMonitor.cAdvisorMetricRelabelings` | The `metric_relabel_configs` for scraping cAdvisor. | `` |
+| `kubeControllerManager.enabled` | Deploy a `service` and `serviceMonitor` to scrape the Kubernetes controller-manager | `true` |
+| `kubeControllerManager.endpoints` | Endpoints where Controller-manager runs. Provide this if running Controller-manager outside the cluster | `[]` |
+| `kubeControllermanager.service.port` | Controller-manager port for the service runs on | `10252` |
+| `kubeControllermanager.service.targetPort` | Controller-manager targetPort for the service runs on | `10252` |
+| `kubeControllermanager.service.selector` | Controller-manager service selector | `{"k8s-app" : "kube-controller-manager" }`
+| `coreDns.enabled` | Deploy coreDns scraping components. Use either this or kubeDns | true |
+| `coreDns.service.port` | CoreDns port | `9153` |
+| `coreDns.service.targetPort` | CoreDns targetPort | `9153` |
+| `coreDns.service.selector` | CoreDns service selector | `{"k8s-app" : "coredns" }`
+| `kubeDns.enabled` | Deploy kubeDns scraping components. Use either this or coreDns| `false` |
+| `kubeDns.service.selector` | CoreDns service selector | `{"k8s-app" : "kube-dns" }` |
+| `kubeEtcd.enabled` | Deploy components to scrape etcd | `true` |
+| `kubeEtcd.endpoints` | Endpoints where etcd runs. Provide this if running etcd outside the cluster | `[]` |
+| `kubeEtcd.service.port` | Etcd port | `4001` |
+| `kubeEtcd.service.targetPort` | Etcd targetPort | `4001` |
+| `kubeEtcd.service.selector` | Selector for etcd if running inside the cluster | `{"k8s-app":"etcd-server"}` |
+| `kubeEtcd.serviceMonitor.scheme` | Etcd servicemonitor scheme | `http` |
+| `kubeEtcd.serviceMonitor.insecureSkipVerify` | Skip validating etcd TLS certificate when scraping | `false` |
+| `kubeEtcd.serviceMonitor.serverName` | Etcd server name to validate certificate against when scraping | `""` |
+| `kubeEtcd.serviceMonitor.caFile` | Certificate authority file to use when connecting to etcd. See `prometheus.prometheusSpec.secrets` | `""` |
+| `kubeEtcd.serviceMonitor.certFile` | Client certificate file to use when connecting to etcd. See `prometheus.prometheusSpec.secrets` | `""` |
+| `kubeEtcd.serviceMonitor.keyFile` | Client key file to use when connecting to etcd. See `prometheus.prometheusSpec.secrets` | `""` |
+| `kubeScheduler.enabled` | Deploy a `service` and `serviceMonitor` to scrape the Kubernetes scheduler | `true` |
+| `kubeScheduler.endpoints` | Endpoints where scheduler runs. Provide this if running scheduler outside the cluster | `[]` |
+| `kubeScheduler.service.port` | Scheduler port for the service runs on | `10251` |
+| `kubeScheduler.service.targetPort` | Scheduler targetPort for the service runs on | `10251` |
+| `kubeScheduler.service.selector` | Scheduler service selector | `{"k8s-app" : "kube-scheduler" }`
+| `kubeStateMetrics.enabled` | Deploy the `kube-state-metrics` chart and configure a servicemonitor to scrape | `true` |
+| `kube-state-metrics.rbac.create` | Create RBAC components in kube-state-metrics. See `global.rbac.create` | `true` |
+| `kube-state-metrics.podSecurityPolicy.enabled` | Create pod security policy resource for kube-state-metrics. | `true` |
+| `nodeExporter.enabled` | Deploy the `prometheus-node-exporter` and scrape it | `true` |
+| `nodeExporter.jobLabel` | The name of the label on the target service to use as the job name in prometheus. See `prometheus-node-exporter.podLabels.jobLabel=node-exporter` default | `jobLabel` |
+| `prometheus-node-exporter.podLabels` | Additional labels for pods in the DaemonSet | `{"jobLabel":"node-exporter"}` |
+| `prometheus-node-exporter.extraArgs` | Additional arguments for the node exporter container | `["--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)", "--collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$"]` |
+
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install --name my-release stable/prometheus-operator --set prometheusOperator.enabled=true
+```
+
+Alternatively, one or more YAML files that specify the values for the above parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install --name my-release stable/prometheus-operator -f values1.yaml,values2.yaml
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+
+## Developing Prometheus Rules and Grafana Dashboards
+
+This chart Grafana Dashboards and Prometheus Rules are just a copy from coreos/prometheus-operator and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to original repo](https://github.com/coreos/prometheus-operator/blob/master/contrib/kube-prometheus/docs/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts.
+
+## Further Information
+
+For more in-depth documentation of configuration options meanings, please see
+- [Prometheus Operator](https://github.com/coreos/prometheus-operator)
+- [Prometheus](https://prometheus.io/docs/introduction/overview/)
+- [Grafana](https://github.com/helm/charts/tree/master/stable/grafana#grafana-helm-chart)
+
+## Helm <2.10 workaround
+The `crd-install` hook is required to deploy the prometheus operator CRDs before they are used. If you are forced to use an earlier version of Helm you can work around this requirement as follows:
+1. Install prometheus-operator by itself, disabling everything but the prometheus-operator component, and also setting `prometheusOperator.serviceMonitor.selfMonitor=false`
+2. Install all the other components, and configure `prometheus.additionalServiceMonitors` to scrape the prometheus-operator service.
+
+# Migrating from coreos/prometheus-operator chart
+
+The multiple charts have been combined into a single chart that installs prometheus operator, prometheus, alertmanager, grafana as well as the multitude of exporters necessary to monitor a cluster.
+
+There is no simple and direct migration path between the charts as the changes are extensive and intended to make the chart easier to support.
+
+The capabilities of the old chart are all available in the new chart, including the ability to run multiple prometheus instances on a single cluster - you will need to disable the parts of the chart you do not wish to deploy.
+
+You can check out the tickets for this change [here](https://github.com/coreos/prometheus-operator/issues/592) and [here](https://github.com/helm/charts/pull/6765)
+
+## High-level overview of Changes
+The chart has 3 dependencies, that can be seen in the chart's requirements file:
+https://github.com/helm/charts/blob/master/stable/prometheus-operator/requirements.yaml
+
+### Node-Exporter, Kube-State-Metrics
+These components are loaded as dependencies into the chart. The source for both charts is found in the same repository. They are relatively simple components.
+
+### Grafana
+The Grafana chart is more feature-rich than this chart - it contains a sidecar that is able to load data sources and dashboards from configmaps deployed into the same cluster. For more information check out the [documentation for the chart](https://github.com/helm/charts/tree/master/stable/grafana)
+
+### Coreos CRDs
+The CRDs are provisioned using crd-install hooks, rather than relying on a separate chart installation. If you already have these CRDs provisioned and don't want to remove them, you can disable the CRD creation by these hooks by passing `prometheusOperator.createCustomResource=false`
+
+### Kubelet Service
+Because the kubelet service has a new name in the chart, make sure to clean up the old kubelet service in the `kube-system` namespace to prevent counting container metrics twice
+
+### Persistent Volumes
+If you would like to keep the data of the current persistent volumes, it should be possible to attach existing volumes to new PVCs and PVs that are created using the conventions in the new chart. For example, in order to use an existing Azure disk for a helm release called `prometheus-migration` the following resources can be created:
+```
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pvc-prometheus-migration-prometheus-0
+spec:
+ accessModes:
+ - ReadWriteOnce
+ azureDisk:
+ cachingMode: None
+ diskName: pvc-prometheus-migration-prometheus-0
+ diskURI: /subscriptions/f5125d82-2622-4c50-8d25-3f7ba3e9ac4b/resourceGroups/sample-migration-resource-group/providers/Microsoft.Compute/disks/pvc-prometheus-migration-prometheus-0
+ fsType: ""
+ kind: Managed
+ readOnly: false
+ capacity:
+ storage: 1Gi
+ persistentVolumeReclaimPolicy: Delete
+ storageClassName: prometheus
+ volumeMode: Filesystem
+```
+```
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ labels:
+ app: prometheus
+ prometheus: prometheus-migration-prometheus
+ name: prometheus-prometheus-migration-prometheus-db-prometheus-prometheus-migration-prometheus-0
+ namespace: monitoring
+spec:
+ accessModes:
+ - ReadWriteOnce
+ dataSource: null
+ resources:
+ requests:
+ storage: 1Gi
+ storageClassName: prometheus
+ volumeMode: Filesystem
+ volumeName: pvc-prometheus-migration-prometheus-0
+status:
+ accessModes:
+ - ReadWriteOnce
+ capacity:
+ storage: 1Gi
+```
+
+The PVC will take ownership of the PV and when you create a release using a persistent volume claim template it will use the existing PVCs as they match the naming convention used by the chart. For other cloud providers similar approaches can be used.
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/NOTES.txt b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/NOTES.txt
new file mode 100644
index 00000000..b5aec761
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/NOTES.txt
@@ -0,0 +1,5 @@
+The Prometheus Operator has been installed. Check its status by running:
+ kubectl --namespace {{ .Release.Namespace }} get pods -l "release={{ .Release.Name }}"
+
+Visit https://github.com/coreos/prometheus-operator for instructions on how
+to create & configure Alertmanager and Prometheus instances using the Operator. \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/_helpers.tpl b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/_helpers.tpl
new file mode 100644
index 00000000..6ec1fa2b
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/_helpers.tpl
@@ -0,0 +1,91 @@
+{{/* vim: set filetype=mustache: */}}
+{{/* Expand the name of the chart. This is suffixed with -alertmanager, which means subtract 13 from longest 63 available */}}
+{{- define "prometheus-operator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 50 | trimSuffix "-" -}}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+The components in this chart create additional resources that expand the longest created name strings.
+The longest name that gets created adds and extra 37 characters, so truncation should be 63-35=26.
+*/}}
+{{- define "prometheus-operator.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 26 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 26 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 26 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Fullname suffixed with operator */}}
+{{- define "prometheus-operator.operator.fullname" -}}
+{{- printf "%s-operator" (include "prometheus-operator.fullname" .) -}}
+{{- end }}
+
+{{/* Fullname suffixed with prometheus */}}
+{{- define "prometheus-operator.prometheus.fullname" -}}
+{{- printf "%s-prometheus" (include "prometheus-operator.fullname" .) -}}
+{{- end }}
+
+{{/* Fullname suffixed with alertmanager */}}
+{{- define "prometheus-operator.alertmanager.fullname" -}}
+{{- printf "%s-alertmanager" (include "prometheus-operator.fullname" .) -}}
+{{- end }}
+
+{{/* Create chart name and version as used by the chart label. */}}
+{{- define "prometheus-operator.chartref" -}}
+{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}}
+{{- end }}
+
+{{/* Generate basic labels */}}
+{{- define "prometheus-operator.labels" }}
+chart: {{ template "prometheus-operator.chartref" . }}
+release: {{ .Release.Name | quote }}
+heritage: {{ .Release.Service | quote }}
+{{- if .Values.commonLabels}}
+{{ toYaml .Values.commonLabels }}
+{{- end }}
+{{- end }}
+
+{{/* Create the name of prometheus-operator service account to use */}}
+{{- define "prometheus-operator.operator.serviceAccountName" -}}
+{{- if and .Values.global.rbac.create .Values.prometheusOperator.serviceAccount.create -}}
+ {{ default (include "prometheus-operator.operator.fullname" .) .Values.prometheusOperator.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.prometheusOperator.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/* Create the name of prometheus service account to use */}}
+{{- define "prometheus-operator.prometheus.serviceAccountName" -}}
+{{- if and .Values.global.rbac.create .Values.prometheus.serviceAccount.create -}}
+ {{ default (include "prometheus-operator.prometheus.fullname" .) .Values.prometheus.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.prometheus.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/* Create the name of alertmanager service account to use */}}
+{{- define "prometheus-operator.alertmanager.serviceAccountName" -}}
+{{- if and .Values.global.rbac.create .Values.alertmanager.serviceAccount.create -}}
+ {{ default (include "prometheus-operator.alertmanager.fullname" .) .Values.alertmanager.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.alertmanager.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/* Workaround for https://github.com/helm/helm/issues/3117 */}}
+{{- define "prometheus-operator.rangeskipempty" -}}
+{{- range $key, $value := . }}
+{{- if $value }}
+{{ $key }}: {{ $value }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/alertmanager.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/alertmanager.yaml
new file mode 100644
index 00000000..24f93847
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/alertmanager.yaml
@@ -0,0 +1,100 @@
+{{- if .Values.alertmanager.enabled }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: Alertmanager
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-alertmanager
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-alertmanager
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+{{- if .Values.alertmanager.alertmanagerSpec.image }}
+ baseImage: {{ .Values.alertmanager.alertmanagerSpec.image.repository }}
+ version: {{ .Values.alertmanager.alertmanagerSpec.image.tag }}
+{{- end }}
+ replicas: {{ .Values.alertmanager.alertmanagerSpec.replicas }}
+ listenLocal: {{ .Values.alertmanager.alertmanagerSpec.listenLocal }}
+ serviceAccountName: {{ template "prometheus-operator.alertmanager.serviceAccountName" . }}
+{{- if .Values.alertmanager.alertmanagerSpec.externalUrl }}
+ externalUrl: "{{ .Values.alertmanager.alertmanagerSpec.externalUrl }}"
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.externalUrl }}
+ externalUrl: "{{ .Values.alertmanager.alertmanagerSpec.externalUrl }}"
+{{- else if .Values.alertmanager.ingress.enabled }}
+ externalUrl: "http://{{ index .Values.alertmanager.ingress.hosts 0 }}{{ .Values.alertmanager.alertmanagerSpec.routePrefix }}"
+{{- else }}
+ externalUrl: http://{{ template "prometheus-operator.fullname" . }}-alertmanager.{{ .Release.Namespace }}:9093
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.alertmanager.alertmanagerSpec.nodeSelector | indent 4 }}
+{{- end }}
+ paused: {{ .Values.alertmanager.alertmanagerSpec.paused }}
+ logLevel: {{ .Values.alertmanager.alertmanagerSpec.logLevel | quote }}
+ retention: {{ .Values.alertmanager.alertmanagerSpec.retention | quote }}
+{{- if .Values.alertmanager.alertmanagerSpec.secrets }}
+ secrets:
+{{ toYaml .Values.alertmanager.alertmanagerSpec.secrets | indent 4 }}
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.configMaps }}
+ configMaps:
+{{ toYaml .Values.alertmanager.alertmanagerSpec.configMaps | indent 4 }}
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.resources }}
+ resources:
+{{ toYaml .Values.alertmanager.alertmanagerSpec.resources | indent 4 }}
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.routePrefix }}
+ routePrefix: "{{ .Values.alertmanager.alertmanagerSpec.routePrefix }}"
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.securityContext }}
+ securityContext:
+{{ toYaml .Values.alertmanager.alertmanagerSpec.securityContext | indent 4 }}
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.storage }}
+ storage:
+{{ toYaml .Values.alertmanager.alertmanagerSpec.storage | indent 4 }}
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.podMetadata }}
+ podMetadata:
+{{ toYaml .Values.alertmanager.alertmanagerSpec.podMetadata | indent 4 }}
+{{- end }}
+{{- if eq .Values.alertmanager.alertmanagerSpec.podAntiAffinity "hard" }}
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: {{ .Values.alertmanager.alertmanagerSpec.podAntiAffinityTopologyKey }}
+ labelSelector:
+ matchLabels:
+ app: alertmanager
+ alertmanager: {{ template "prometheus-operator.fullname" . }}-alertmanager
+{{- else if eq .Values.alertmanager.alertmanagerSpec.podAntiAffinity "soft" }}
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ topologyKey: {{ .Values.alertmanager.alertmanagerSpec.podAntiAffinityTopologyKey }}
+ labelSelector:
+ matchLabels:
+ app: alertmanager
+ alertmanager: {{ template "prometheus-operator.fullname" . }}-alertmanager
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.tolerations }}
+ tolerations:
+{{ toYaml .Values.alertmanager.alertmanagerSpec.tolerations | indent 4 }}
+{{- end }}
+{{- if .Values.global.imagePullSecrets }}
+ imagePullSecrets:
+{{ toYaml .Values.global.imagePullSecrets | indent 4 }}
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.containers }}
+ containers:
+{{ toYaml .Values.alertmanager.alertmanagerSpec.containers | indent 4 }}
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.priorityClassName }}
+ priorityClassName: {{.Values.alertmanager.alertmanagerSpec.priorityClassName }}
+{{- end }}
+{{- if .Values.alertmanager.alertmanagerSpec.additionalPeers }}
+ additionalPeers: {{.Values.alertmanager.alertmanagerSpec.additionalPeers }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/ingress.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/ingress.yaml
new file mode 100644
index 00000000..fd657f71
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/ingress.yaml
@@ -0,0 +1,33 @@
+{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled }}
+{{- $routePrefix := .Values.alertmanager.alertmanagerSpec.routePrefix }}
+{{- $serviceName := printf "%s-%s" (include "prometheus-operator.fullname" .) "alertmanager" }}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ $serviceName }}
+{{- if .Values.alertmanager.ingress.annotations }}
+ annotations:
+{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }}
+{{- end }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-alertmanager
+{{- if .Values.alertmanager.ingress.labels }}
+{{ toYaml .Values.alertmanager.ingress.labels | indent 4 }}
+{{- end }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ rules:
+ {{- range $host := .Values.alertmanager.ingress.hosts }}
+ - host: {{ . }}
+ http:
+ paths:
+ - path: "{{ $routePrefix }}"
+ backend:
+ serviceName: {{ $serviceName }}
+ servicePort: 9093
+ {{- end }}
+{{- if .Values.alertmanager.ingress.tls }}
+ tls:
+{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }}
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/podDisruptionBudget.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/podDisruptionBudget.yaml
new file mode 100644
index 00000000..f240fe76
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/podDisruptionBudget.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.alertmanager.enabled .Values.alertmanager.podDisruptionBudget.enabled }}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-alertmanager
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-alertmanager
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ {{- if .Values.alertmanager.podDisruptionBudget.minAvailable }}
+ minAvailable: {{ .Values.alertmanager.podDisruptionBudget.minAvailable }}
+ {{- end }}
+ {{- if .Values.alertmanager.podDisruptionBudget.maxUnavailable }}
+ maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }}
+ {{- end }}
+ selector:
+ matchLabels:
+ app: alertmanager
+ alertmanager: {{ template "prometheus-operator.fullname" . }}-alertmanager
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp-clusterrole.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp-clusterrole.yaml
new file mode 100644
index 00000000..e83d8bc7
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp-clusterrole.yaml
@@ -0,0 +1,15 @@
+{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-alertmanager
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-alertmanager
+{{ include "prometheus-operator.labels" . | indent 4 }}
+rules:
+- apiGroups: ['extensions']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames:
+ - {{ template "prometheus-operator.fullname" . }}-alertmanager
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp-clusterrolebinding.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp-clusterrolebinding.yaml
new file mode 100644
index 00000000..e1d06ab4
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp-clusterrolebinding.yaml
@@ -0,0 +1,17 @@
+{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-alertmanager
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-alertmanager
+{{ include "prometheus-operator.labels" . | indent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "prometheus-operator.fullname" . }}-alertmanager
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "prometheus-operator.alertmanager.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp.yaml
new file mode 100644
index 00000000..01eda240
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/psp.yaml
@@ -0,0 +1,48 @@
+{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-alertmanager
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-alertmanager
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ privileged: false
+ # Required to prevent escalations to root.
+ # allowPrivilegeEscalation: false
+ # This is redundant with non-root + disallow privilege escalation,
+ # but we can provide it for defense in depth.
+ #requiredDropCapabilities:
+ # - ALL
+ # Allow core volume types.
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ - 'persistentVolumeClaim'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ # Permits the container to run with root privileges as well.
+ rule: 'RunAsAny'
+ seLinux:
+ # This policy assumes the nodes are using AppArmor rather than SELinux.
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 0
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 0
+ max: 65535
+ readOnlyRootFilesystem: false
+{{- end }}
+
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/secret.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/secret.yaml
new file mode 100644
index 00000000..e73c465f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/secret.yaml
@@ -0,0 +1,14 @@
+{{- if and .Values.alertmanager.enabled }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: alertmanager-{{ template "prometheus-operator.fullname" . }}-alertmanager
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-alertmanager
+{{ include "prometheus-operator.labels" . | indent 4 }}
+data:
+ alertmanager.yaml: {{ toYaml .Values.alertmanager.config | b64enc | quote }}
+{{- range $key, $val := .Values.alertmanager.templateFiles }}
+ {{ $key }}: {{ $val | b64enc | quote }}
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/service.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/service.yaml
new file mode 100644
index 00000000..d10bf745
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/service.yaml
@@ -0,0 +1,42 @@
+{{- if .Values.alertmanager.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-alertmanager
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-alertmanager
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.alertmanager.service.annotations }}
+ annotations:
+{{ toYaml .Values.alertmanager.service.annotations | indent 4 }}
+{{- end }}
+spec:
+{{- if .Values.alertmanager.service.clusterIP }}
+ clusterIP: {{ .Values.alertmanager.service.clusterIP }}
+{{- end }}
+{{- if .Values.alertmanager.service.externalIPs }}
+ externalIPs:
+{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }}
+{{- end }}
+{{- if .Values.alertmanager.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.alertmanager.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+{{- end }}
+ ports:
+ - name: web
+ {{- if eq .Values.alertmanager.service.type "NodePort" }}
+ nodePort: {{ .Values.alertmanager.service.nodePort }}
+ {{- end }}
+ port: 9093
+ targetPort: 9093
+ protocol: TCP
+ selector:
+ app: alertmanager
+ alertmanager: {{ template "prometheus-operator.fullname" . }}-alertmanager
+ type: "{{ .Values.alertmanager.service.type }}"
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/serviceaccount.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/serviceaccount.yaml
new file mode 100644
index 00000000..bbed0287
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.alertmanager.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "prometheus-operator.alertmanager.serviceAccountName" . }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-alertmanager
+{{ include "prometheus-operator.labels" . | indent 4 }}
+imagePullSecrets:
+{{ toYaml .Values.global.imagePullSecrets | indent 2 }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/servicemonitor.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/servicemonitor.yaml
new file mode 100644
index 00000000..5c8cab90
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/alertmanager/servicemonitor.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.alertmanager.enabled .Values.alertmanager.serviceMonitor.selfMonitor }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: ServiceMonitor
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-alertmanager
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-alertmanager
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "prometheus-operator.name" . }}-alertmanager
+ release: {{ .Release.Name | quote }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace | quote }}
+ endpoints:
+ - port: web
+ interval: 30s
+ path: "{{ trimSuffix "/" .Values.alertmanager.alertmanagerSpec.routePrefix }}/metrics"
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/exporters/node-exporter/servicemonitor.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/exporters/node-exporter/servicemonitor.yaml
new file mode 100644
index 00000000..392b7c93
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/exporters/node-exporter/servicemonitor.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.nodeExporter.enabled }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: ServiceMonitor
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-node-exporter
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-node-exporter
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ jobLabel: {{ .Values.nodeExporter.jobLabel }}
+ selector:
+ matchLabels:
+ app: prometheus-node-exporter
+ release: {{ .Release.Name }}
+ endpoints:
+ - port: metrics
+ interval: 30s
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/configmap-dashboards.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/configmap-dashboards.yaml
new file mode 100644
index 00000000..0289154b
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/configmap-dashboards.yaml
@@ -0,0 +1,23 @@
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+{{- $files := .Files.Glob "dashboards/*.json" }}
+{{- if $files }}
+apiVersion: v1
+kind: ConfigMapList
+items:
+{{- range $path, $fileContents := $files }}
+{{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }}
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) $dashboardName | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 6 }}
+ data:
+ {{ $dashboardName }}.json: {{ $.Files.Get $path | toJson }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/configmaps-datasources.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/configmaps-datasources.yaml
new file mode 100644
index 00000000..5b8b54c4
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/configmaps-datasources.yaml
@@ -0,0 +1,19 @@
+{{- if and .Values.grafana.enabled .Values.grafana.sidecar.datasources.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-grafana-datasource
+ labels:
+ {{ $.Values.grafana.sidecar.datasources.label }}: "1"
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ datasource.yaml: |-
+ apiVersion: 1
+ datasources:
+ - name: Prometheus
+ type: prometheus
+ url: http://{{ template "prometheus-operator.fullname" . }}-prometheus:9090/{{ trimPrefix "/" .Values.prometheus.prometheusSpec.routePrefix }}
+ access: proxy
+ isDefault: true
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/etcd.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/etcd.yaml
new file mode 100644
index 00000000..161b1907
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/etcd.yaml
@@ -0,0 +1,1110 @@
+# Generated from 'etcd' from https://raw.githubusercontent.com/etcd-io/etcd/master/Documentation/op-guide/grafana.json
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.kubeEtcd.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) "etcd" | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ etcd.json: |-
+ {
+ "annotations": {
+ "list": []
+ },
+ "description": "etcd sample Grafana dashboard with Prometheus",
+ "editable": true,
+ "gnetId": null,
+ "hideControls": false,
+ "id": 6,
+ "links": [],
+ "refresh": false,
+ "rows": [
+ {
+ "collapse": false,
+ "editable": true,
+ "height": "250px",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 28,
+ "interval": null,
+ "isNew": true,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "targets": [
+ {
+ "expr": "sum(etcd_server_has_leader{job=\"$cluster\"})",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "metric": "etcd_server_has_leader",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Up",
+ "type": "singlestat",
+ "valueFontSize": "200%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "id": 23,
+ "isNew": true,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "span": 5,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(grpc_server_started_total{job=\"$cluster\",grpc_type=\"unary\"}[5m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "RPC Rate",
+ "metric": "grpc_server_started_total",
+ "refId": "A",
+ "step": 2
+ },
+ {
+ "expr": "sum(rate(grpc_server_handled_total{job=\"$cluster\",grpc_type=\"unary\",grpc_code!=\"OK\"}[5m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "RPC Failed Rate",
+ "metric": "grpc_server_handled_total",
+ "refId": "B",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "RPC Rate",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ops",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "id": 41,
+ "isNew": true,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "span": 4,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(grpc_server_started_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})",
+ "intervalFactor": 2,
+ "legendFormat": "Watch Streams",
+ "metric": "grpc_server_handled_total",
+ "refId": "A",
+ "step": 4
+ },
+ {
+ "expr": "sum(grpc_server_started_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})",
+ "intervalFactor": 2,
+ "legendFormat": "Lease Streams",
+ "metric": "grpc_server_handled_total",
+ "refId": "B",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Active Streams",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "showTitle": false,
+ "title": "Row"
+ },
+ {
+ "collapse": false,
+ "editable": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "datasource": "$datasource",
+ "decimals": null,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 1,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "etcd_mvcc_db_total_size_in_bytes{job=\"$cluster\"}",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{instance}}`}} DB Size",
+ "metric": "",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "DB Size",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "span": 4,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=\"$cluster\"}[5m])) by (instance, le))",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{instance}}`}} WAL fsync",
+ "metric": "etcd_disk_wal_fsync_duration_seconds_bucket",
+ "refId": "A",
+ "step": 4
+ },
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket{job=\"$cluster\"}[5m])) by (instance, le))",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{instance}}`}} DB fsync",
+ "metric": "etcd_disk_backend_commit_duration_seconds_bucket",
+ "refId": "B",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk Sync Duration",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "id": 29,
+ "isNew": true,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "process_resident_memory_bytes{job=\"$cluster\"}",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{instance}}`}} Resident Memory",
+ "metric": "process_resident_memory_bytes",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "title": "New row"
+ },
+ {
+ "collapse": false,
+ "editable": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 5,
+ "id": 22,
+ "isNew": true,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "span": 3,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(etcd_network_client_grpc_received_bytes_total{job=\"$cluster\"}[5m])",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{instance}}`}} Client Traffic In",
+ "metric": "etcd_network_client_grpc_received_bytes_total",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Client Traffic In",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 5,
+ "id": 21,
+ "isNew": true,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "span": 3,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(etcd_network_client_grpc_sent_bytes_total{job=\"$cluster\"}[5m])",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{instance}}`}} Client Traffic Out",
+ "metric": "etcd_network_client_grpc_sent_bytes_total",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Client Traffic Out",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "id": 20,
+ "isNew": true,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "span": 3,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(etcd_network_peer_received_bytes_total{job=\"$cluster\"}[5m])) by (instance)",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{instance}}`}} Peer Traffic In",
+ "metric": "etcd_network_peer_received_bytes_total",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Peer Traffic In",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "datasource": "$datasource",
+ "decimals": null,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 16,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "span": 3,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(etcd_network_peer_sent_bytes_total{job=\"$cluster\"}[5m])) by (instance)",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{instance}}`}} Peer Traffic Out",
+ "metric": "etcd_network_peer_sent_bytes_total",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Peer Traffic Out",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "title": "New row"
+ },
+ {
+ "collapse": false,
+ "editable": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "id": 40,
+ "isNew": true,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(etcd_server_proposals_failed_total{job=\"$cluster\"}[5m]))",
+ "intervalFactor": 2,
+ "legendFormat": "Proposal Failure Rate",
+ "metric": "etcd_server_proposals_failed_total",
+ "refId": "A",
+ "step": 2
+ },
+ {
+ "expr": "sum(etcd_server_proposals_pending{job=\"$cluster\"})",
+ "intervalFactor": 2,
+ "legendFormat": "Proposal Pending Total",
+ "metric": "etcd_server_proposals_pending",
+ "refId": "B",
+ "step": 2
+ },
+ {
+ "expr": "sum(rate(etcd_server_proposals_committed_total{job=\"$cluster\"}[5m]))",
+ "intervalFactor": 2,
+ "legendFormat": "Proposal Commit Rate",
+ "metric": "etcd_server_proposals_committed_total",
+ "refId": "C",
+ "step": 2
+ },
+ {
+ "expr": "sum(rate(etcd_server_proposals_applied_total{job=\"$cluster\"}[5m]))",
+ "intervalFactor": 2,
+ "legendFormat": "Proposal Apply Rate",
+ "refId": "D",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Raft Proposals",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "datasource": "$datasource",
+ "decimals": 0,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "id": 19,
+ "isNew": true,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "changes(etcd_server_leader_changes_seen_total{job=\"$cluster\"}[1d])",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{instance}}`}} Total Leader Elections Per Day",
+ "metric": "etcd_server_leader_changes_seen_total",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total Leader Elections Per Day",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "title": "New row"
+ }
+ ],
+ "schemaVersion": 13,
+ "sharedCrosshair": false,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "prod",
+ "value": "prod"
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "cluster",
+ "multi": false,
+ "name": "cluster",
+ "options": [],
+ "query": "label_values(etcd_server_has_leader, job)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 2,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {
+ "now": true,
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "etcd",
+ "version": 215
+ }
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml
new file mode 100644
index 00000000..5792140e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml
@@ -0,0 +1,926 @@
+# Generated from 'k8s-cluster-rsrc-use' from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) "k8s-cluster-rsrc-use" | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ k8s-cluster-rsrc-use.json: |-
+ {
+ "annotations": {
+ "list": [
+
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "links": [
+
+ ],
+ "refresh": "10s",
+ "rows": [
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 1,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:cluster_cpu_utilisation:ratio",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{node}}`}}",
+ "legendLink": "/d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Utilisation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": 1,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_cpu_saturation_load1: / scalar(sum(min(kube_pod_info) by (node)))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{node}}`}}",
+ "legendLink": "/d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Saturation (Load1)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": 1,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CPU",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:cluster_memory_utilisation:ratio",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{node}}`}}",
+ "legendLink": "/d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Utilisation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": 1,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_memory_swap_io_bytes:sum_rate",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{node}}`}}",
+ "legendLink": "/d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Saturation (Swap I/O)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Memory",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 5,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_disk_utilisation:avg_irate / scalar(:kube_pod_info_node_count:)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{node}}`}}",
+ "legendLink": "/d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk IO Utilisation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": 1,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 6,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_disk_saturation:avg_irate / scalar(:kube_pod_info_node_count:)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{node}}`}}",
+ "legendLink": "/d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk IO Saturation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": 1,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Disk",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 7,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_net_utilisation:sum_irate",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{node}}`}}",
+ "legendLink": "/d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Net Utilisation (Transmitted)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 8,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_net_saturation:sum_irate",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{node}}`}}",
+ "legendLink": "/d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Net Saturation (Dropped)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Network",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 9,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(max(node_filesystem_size_bytes{fstype=\u007e\"ext[234]|btrfs|xfs|zfs\"} - node_filesystem_avail_bytes{fstype=\u007e\"ext[234]|btrfs|xfs|zfs\"}) by (device,pod,namespace)) by (pod,namespace)\n/ scalar(sum(max(node_filesystem_size_bytes{fstype=\u007e\"ext[234]|btrfs|xfs|zfs\"}) by (device,pod,namespace)))\n* on (namespace, pod) group_left (node) node_namespace_pod:kube_pod_info:\n",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{node}}`}}",
+ "legendLink": "/d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk Capacity",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": 1,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Storage",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes-mixin"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [
+
+ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Kubernetes / USE Method / Cluster",
+ "uid": "a6e7d1362e1ddbb79db21d5bb40d7137",
+ "version": 0
+ }
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-coredns.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-coredns.yaml
new file mode 100644
index 00000000..b638913e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-coredns.yaml
@@ -0,0 +1,1323 @@
+# Added manually, can be changed in-place.
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.coreDns.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) "k8s-coredns" | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ k8s-coredns.json: |-
+ {
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "A dashboard for the CoreDNS DNS server.",
+ "editable": true,
+ "gnetId": 5926,
+ "graphTooltip": 0,
+ "id": 9,
+ "iteration": 1539947521873,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 0,
+ "y": 0
+ },
+ "id": 1,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "total",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(coredns_dns_request_count_total{instance=~\"$instance\"}[5m])) by (proto)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "sum(rate(coredns_dns_request_count_total{instance=~\"$instance\"}[5m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "total",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Requests (total)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "pps",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "pps",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 8,
+ "y": 0
+ },
+ "id": 12,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "total",
+ "yaxis": 2
+ },
+ {
+ "alias": "other",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(coredns_dns_request_type_count_total{instance=~\"$instance\"}[5m])) by (type)",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{type}}`}}",
+ "refId": "A",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Requests (by qtype)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "pps",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "pps",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 16,
+ "y": 0
+ },
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "total",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(coredns_dns_request_count_total{instance=~\"$instance\"}[5m])) by (zone)",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{zone}}`}}",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "sum(rate(coredns_dns_request_count_total{instance=~\"$instance\"}[5m]))",
+ "intervalFactor": 2,
+ "legendFormat": "total",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Requests (by zone)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "pps",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "pps",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 7
+ },
+ "id": 10,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "total",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(coredns_dns_request_do_count_total{instance=~\"$instance\"}[5m]))",
+ "intervalFactor": 2,
+ "legendFormat": "DO",
+ "refId": "A",
+ "step": 40
+ },
+ {
+ "expr": "sum(rate(coredns_dns_request_count_total{instance=~\"$instance\"}[5m]))",
+ "intervalFactor": 2,
+ "legendFormat": "total",
+ "refId": "B",
+ "step": 40
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Requests (DO bit)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "pps",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "pps",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 12,
+ "y": 7
+ },
+ "id": 9,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "tcp:90",
+ "yaxis": 2
+ },
+ {
+ "alias": "tcp:99 ",
+ "yaxis": 2
+ },
+ {
+ "alias": "tcp:50",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"udp\"}[5m])) by (le,proto))",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:99 ",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.90, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"udp\"}[5m])) by (le,proto))",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:90",
+ "refId": "B",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.50, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"udp\"}[5m])) by (le,proto))",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:50",
+ "refId": "C",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Requests (size, udp)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 18,
+ "y": 7
+ },
+ "id": 14,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "tcp:90",
+ "yaxis": 1
+ },
+ {
+ "alias": "tcp:99 ",
+ "yaxis": 1
+ },
+ {
+ "alias": "tcp:50",
+ "yaxis": 1
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le,proto))",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:99 ",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.90, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le,proto))",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:90",
+ "refId": "B",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.50, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le,proto))",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:50",
+ "refId": "C",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Requests (size,tcp)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 14
+ },
+ "id": 5,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(coredns_dns_response_rcode_count_total{instance=~\"$instance\"}[5m])) by (rcode)",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{rcode}}`}}",
+ "refId": "A",
+ "step": 40
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Responses (by rcode)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "pps",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 14
+ },
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(coredns_dns_request_duration_seconds_bucket{instance=~\"$instance\"}[5m])) by (le, job))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "99%",
+ "refId": "A",
+ "step": 40
+ },
+ {
+ "expr": "histogram_quantile(0.90, sum(rate(coredns_dns_request_duration_seconds_bucket{instance=~\"$instance\"}[5m])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "90%",
+ "refId": "B",
+ "step": 40
+ },
+ {
+ "expr": "histogram_quantile(0.50, sum(rate(coredns_dns_request_duration_seconds_bucket{instance=~\"$instance\"}[5m])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "50%",
+ "refId": "C",
+ "step": 40
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Responses (duration)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 21
+ },
+ "id": 8,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "udp:50%",
+ "yaxis": 1
+ },
+ {
+ "alias": "tcp:50%",
+ "yaxis": 2
+ },
+ {
+ "alias": "tcp:90%",
+ "yaxis": 2
+ },
+ {
+ "alias": "tcp:99%",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\"$instance\",proto=\"udp\"}[5m])) by (le,proto)) ",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:99%",
+ "refId": "A",
+ "step": 40
+ },
+ {
+ "expr": "histogram_quantile(0.90, sum(rate(coredns_dns_response_size_bytes_bucket{instance=\"$instance\",proto=\"udp\"}[5m])) by (le,proto)) ",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:90%",
+ "refId": "B",
+ "step": 40
+ },
+ {
+ "expr": "histogram_quantile(0.50, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\"$instance\",proto=\"udp\"}[5m])) by (le,proto)) ",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:50%",
+ "metric": "",
+ "refId": "C",
+ "step": 40
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Responses (size, udp)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 21
+ },
+ "id": 13,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "udp:50%",
+ "yaxis": 1
+ },
+ {
+ "alias": "tcp:50%",
+ "yaxis": 1
+ },
+ {
+ "alias": "tcp:90%",
+ "yaxis": 1
+ },
+ {
+ "alias": "tcp:99%",
+ "yaxis": 1
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le,proto)) ",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:99%",
+ "refId": "A",
+ "step": 40
+ },
+ {
+ "expr": "histogram_quantile(0.90, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le,proto)) ",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:90%",
+ "refId": "B",
+ "step": 40
+ },
+ {
+ "expr": "histogram_quantile(0.50, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le, proto)) ",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{proto}}`}}:50%",
+ "metric": "",
+ "refId": "C",
+ "step": 40
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Responses (size, tcp)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 28
+ },
+ "id": 15,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(coredns_cache_size{instance=~\"$instance\"}) by (type)",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{type}}`}}",
+ "refId": "A",
+ "step": 40
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Cache (size)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 28
+ },
+ "id": 16,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "misses",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(coredns_cache_hits_total{instance=~\"$instance\"}[5m])) by (type)",
+ "intervalFactor": 2,
+ "legendFormat": "hits:{{`{{type}}`}}",
+ "refId": "A",
+ "step": 40
+ },
+ {
+ "expr": "sum(rate(coredns_cache_misses_total{instance=~\"$instance\"}[5m])) by (type)",
+ "intervalFactor": 2,
+ "legendFormat": "misses",
+ "refId": "B",
+ "step": 40
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Cache (hitrate)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "pps",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "pps",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "schemaVersion": 16,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "allValue": ".*",
+ "current": {
+ "selected": true,
+ "tags": [],
+ "text": "172.16.1.8:9153",
+ "value": "172.16.1.8:9153"
+ },
+ "datasource": "Prometheus",
+ "hide": 0,
+ "includeAll": true,
+ "label": "Instance",
+ "multi": false,
+ "name": "instance",
+ "options": [],
+ "query": "up{job=\"coredns\"}",
+ "refresh": 1,
+ "regex": ".*instance=\"(.*?)\".*",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-3h",
+ "to": "now"
+ },
+ "timepicker": {
+ "now": true,
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "utc",
+ "title": "CoreDNS",
+ "uid": "vkQ0UHxik",
+ "version": 1
+ }
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-node-rsrc-use.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-node-rsrc-use.yaml
new file mode 100644
index 00000000..19394efe
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-node-rsrc-use.yaml
@@ -0,0 +1,953 @@
+# Generated from 'k8s-node-rsrc-use' from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) "k8s-node-rsrc-use" | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ k8s-node-rsrc-use.json: |-
+ {
+ "annotations": {
+ "list": [
+
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "links": [
+
+ ],
+ "refresh": "10s",
+ "rows": [
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 1,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_cpu_utilisation:avg1m{node=\"$node\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Utilisation",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Utilisation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_cpu_saturation_load1:{node=\"$node\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Saturation",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Saturation (Load1)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CPU",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_memory_utilisation:{node=\"$node\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Memory",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Utilisation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_memory_swap_io_bytes:sum_rate{node=\"$node\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Swap IO",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Saturation (Swap I/O)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Memory",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 5,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_disk_utilisation:avg_irate{node=\"$node\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Utilisation",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk IO Utilisation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 6,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_disk_saturation:avg_irate{node=\"$node\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Saturation",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk IO Saturation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Disk",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 7,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_net_utilisation:sum_irate{node=\"$node\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Utilisation",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Net Utilisation (Transmitted)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 8,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_net_saturation:sum_irate{node=\"$node\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Saturation",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Net Saturation (Dropped)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Net",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 9,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_filesystem_usage:\n* on (namespace, pod) group_left (node) node_namespace_pod:kube_pod_info:{node=\"$node\"}\n",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{device}}`}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk Utilisation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Disk",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes-mixin"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [
+
+ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "prod",
+ "value": "prod"
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "node",
+ "multi": false,
+ "name": "node",
+ "options": [
+
+ ],
+ "query": "label_values(kube_node_info, node)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 2,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Kubernetes / USE Method / Node",
+ "uid": "4ac4f123aae0ff6dbaf4f4f66120033b",
+ "version": 0
+ }
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-cluster.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-cluster.yaml
new file mode 100644
index 00000000..2f61ac42
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-cluster.yaml
@@ -0,0 +1,1338 @@
+# Generated from 'k8s-resources-cluster' from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) "k8s-resources-cluster" | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ k8s-resources-cluster.json: |-
+ {
+ "annotations": {
+ "list": [
+
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "links": [
+
+ ],
+ "refresh": "10s",
+ "rows": [
+ {
+ "collapse": false,
+ "height": "100px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "format": "percentunit",
+ "id": 1,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 2,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "1 - avg(rate(node_cpu_seconds_total{mode=\"idle\"}[1m]))",
+ "format": "time_series",
+ "instant": true,
+ "intervalFactor": 2,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "70,80",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Utilisation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "singlestat",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "format": "percentunit",
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 2,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(kube_pod_container_resource_requests_cpu_cores) / sum(node:node_num_cpu:sum)",
+ "format": "time_series",
+ "instant": true,
+ "intervalFactor": 2,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "70,80",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Requests Commitment",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "singlestat",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "format": "percentunit",
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 2,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(kube_pod_container_resource_limits_cpu_cores) / sum(node:node_num_cpu:sum)",
+ "format": "time_series",
+ "instant": true,
+ "intervalFactor": 2,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "70,80",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Limits Commitment",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "singlestat",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "format": "percentunit",
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 2,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "1 - sum(:node_memory_MemFreeCachedBuffers_bytes:sum) / sum(:node_memory_MemTotal_bytes:sum)",
+ "format": "time_series",
+ "instant": true,
+ "intervalFactor": 2,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "70,80",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Utilisation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "singlestat",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "format": "percentunit",
+ "id": 5,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 2,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(kube_pod_container_resource_requests_memory_bytes) / sum(:node_memory_MemTotal_bytes:sum)",
+ "format": "time_series",
+ "instant": true,
+ "intervalFactor": 2,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "70,80",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Requests Commitment",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "singlestat",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "format": "percentunit",
+ "id": 6,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 2,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(kube_pod_container_resource_limits_memory_bytes) / sum(:node_memory_MemTotal_bytes:sum)",
+ "format": "time_series",
+ "instant": true,
+ "intervalFactor": 2,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "70,80",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Limits Commitment",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "singlestat",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Headlines",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 7,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate) by (namespace)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{namespace}}`}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CPU",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 8,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "styles": [
+ {
+ "alias": "Time",
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "pattern": "Time",
+ "type": "hidden"
+ },
+ {
+ "alias": "CPU Usage",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #A",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "CPU Requests",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #B",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "CPU Requests %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #C",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "CPU Limits",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #D",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "CPU Limits %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #E",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "Namespace",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": true,
+ "linkTooltip": "Drill down",
+ "linkUrl": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-namespace=$__cell",
+ "pattern": "namespace",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "pattern": "/.*/",
+ "thresholds": [
+
+ ],
+ "type": "string",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate) by (namespace)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_requests_cpu_cores) by (namespace)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate) by (namespace) / sum(kube_pod_container_resource_requests_cpu_cores) by (namespace)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "C",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_limits_cpu_cores) by (namespace)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "D",
+ "step": 10
+ },
+ {
+ "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate) by (namespace) / sum(kube_pod_container_resource_limits_cpu_cores) by (namespace)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "E",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Quota",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transform": "table",
+ "type": "table",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CPU Quota",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 9,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(container_memory_rss{container_name!=\"\"}) by (namespace)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{namespace}}`}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Usage (w/o cache)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Memory",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 10,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "styles": [
+ {
+ "alias": "Time",
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "pattern": "Time",
+ "type": "hidden"
+ },
+ {
+ "alias": "Memory Usage",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #A",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "decbytes"
+ },
+ {
+ "alias": "Memory Requests",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #B",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "decbytes"
+ },
+ {
+ "alias": "Memory Requests %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #C",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "Memory Limits",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #D",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "decbytes"
+ },
+ {
+ "alias": "Memory Limits %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #E",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "Namespace",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": true,
+ "linkTooltip": "Drill down",
+ "linkUrl": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-namespace=$__cell",
+ "pattern": "namespace",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "pattern": "/.*/",
+ "thresholds": [
+
+ ],
+ "type": "string",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "sum(container_memory_rss{container_name!=\"\"}) by (namespace)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_requests_memory_bytes) by (namespace)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum(container_memory_rss{container_name!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests_memory_bytes) by (namespace)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "C",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_limits_memory_bytes) by (namespace)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "D",
+ "step": 10
+ },
+ {
+ "expr": "sum(container_memory_rss{container_name!=\"\"}) by (namespace) / sum(kube_pod_container_resource_limits_memory_bytes) by (namespace)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "E",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Requests by Namespace",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transform": "table",
+ "type": "table",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Memory Requests",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes-mixin"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [
+
+ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Kubernetes / Compute Resources / Cluster",
+ "uid": "efa86fd1d0c121a26444b636a3f509a8",
+ "version": 0
+ }
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-namespace.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-namespace.yaml
new file mode 100644
index 00000000..fae35be8
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-namespace.yaml
@@ -0,0 +1,849 @@
+# Generated from 'k8s-resources-namespace' from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) "k8s-resources-namespace" | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ k8s-resources-namespace.json: |-
+ {
+ "annotations": {
+ "list": [
+
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "links": [
+
+ ],
+ "refresh": "10s",
+ "rows": [
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 1,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\"}) by (pod_name)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{pod_name}}`}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CPU Usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "styles": [
+ {
+ "alias": "Time",
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "pattern": "Time",
+ "type": "hidden"
+ },
+ {
+ "alias": "CPU Usage",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #A",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "CPU Requests",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #B",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "CPU Requests %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #C",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "CPU Limits",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #D",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "CPU Limits %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #E",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "Pod",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": true,
+ "linkTooltip": "Drill down",
+ "linkUrl": "/d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-namespace=$namespace&var-pod=$__cell",
+ "pattern": "pod",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "pattern": "/.*/",
+ "thresholds": [
+
+ ],
+ "type": "string",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_requests_cpu_cores{namespace=\"$namespace\"}) by (pod)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_requests_cpu_cores{namespace=\"$namespace\"}) by (pod)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "C",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\"}) by (pod)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "D",
+ "step": 10
+ },
+ {
+ "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\"}) by (pod)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "E",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Quota",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transform": "table",
+ "type": "table",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CPU Quota",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(container_memory_usage_bytes{namespace=\"$namespace\", container_name!=\"\"}) by (pod_name)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{pod_name}}`}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Memory Usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "styles": [
+ {
+ "alias": "Time",
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "pattern": "Time",
+ "type": "hidden"
+ },
+ {
+ "alias": "Memory Usage",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #A",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "decbytes"
+ },
+ {
+ "alias": "Memory Requests",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #B",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "decbytes"
+ },
+ {
+ "alias": "Memory Requests %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #C",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "Memory Limits",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #D",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "decbytes"
+ },
+ {
+ "alias": "Memory Limits %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #E",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "Pod",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": true,
+ "linkTooltip": "Drill down",
+ "linkUrl": "/d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-namespace=$namespace&var-pod=$__cell",
+ "pattern": "pod",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "pattern": "/.*/",
+ "thresholds": [
+
+ ],
+ "type": "string",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "sum(label_replace(container_memory_usage_bytes{namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\"}) by (pod)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum(label_replace(container_memory_usage_bytes{namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\"}) by (pod)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "C",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\"}) by (pod)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "D",
+ "step": 10
+ },
+ {
+ "expr": "sum(label_replace(container_memory_usage_bytes{namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\"}) by (pod)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "E",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Quota",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transform": "table",
+ "type": "table",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Memory Quota",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes-mixin"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [
+
+ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "prod",
+ "value": "prod"
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "namespace",
+ "multi": false,
+ "name": "namespace",
+ "options": [
+
+ ],
+ "query": "label_values(kube_pod_info, namespace)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 2,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Kubernetes / Compute Resources / Namespace",
+ "uid": "85a562078cdf77779eaa1add43ccec1e",
+ "version": 0
+ }
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-pod.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-pod.yaml
new file mode 100644
index 00000000..1678a051
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/k8s-resources-pod.yaml
@@ -0,0 +1,876 @@
+# Generated from 'k8s-resources-pod' from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) "k8s-resources-pod" | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ k8s-resources-pod.json: |-
+ {
+ "annotations": {
+ "list": [
+
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "links": [
+
+ ],
+ "refresh": "10s",
+ "rows": [
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 1,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\"}) by (container_name)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{container_name}}`}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CPU Usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "styles": [
+ {
+ "alias": "Time",
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "pattern": "Time",
+ "type": "hidden"
+ },
+ {
+ "alias": "CPU Usage",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #A",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "CPU Requests",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #B",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "CPU Requests %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #C",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "CPU Limits",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #D",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "CPU Limits %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #E",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "Container",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "container",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "pattern": "/.*/",
+ "thresholds": [
+
+ ],
+ "type": "string",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_requests_cpu_cores{namespace=\"$namespace\", pod=\"$pod\"}) by (container)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", pod_name=\"$pod\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_requests_cpu_cores{namespace=\"$namespace\", pod=\"$pod\"}) by (container)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "C",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", pod=\"$pod\"}) by (container)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "D",
+ "step": 10
+ },
+ {
+ "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", pod_name=\"$pod\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", pod=\"$pod\"}) by (container)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "E",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Quota",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transform": "table",
+ "type": "table",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CPU Quota",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(container_memory_usage_bytes{namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", container_name!=\"\"}) by (container_name)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{container_name}}`}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Memory Usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "styles": [
+ {
+ "alias": "Time",
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "pattern": "Time",
+ "type": "hidden"
+ },
+ {
+ "alias": "Memory Usage",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #A",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "decbytes"
+ },
+ {
+ "alias": "Memory Requests",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #B",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "decbytes"
+ },
+ {
+ "alias": "Memory Requests %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #C",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "Memory Limits",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #D",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "decbytes"
+ },
+ {
+ "alias": "Memory Limits %",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "Value #E",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "percentunit"
+ },
+ {
+ "alias": "Container",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "link": false,
+ "linkTooltip": "Drill down",
+ "linkUrl": "",
+ "pattern": "container",
+ "thresholds": [
+
+ ],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "pattern": "/.*/",
+ "thresholds": [
+
+ ],
+ "type": "string",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "sum(label_replace(container_memory_usage_bytes{namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", container_name!=\"\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\", pod=\"$pod\"}) by (container)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum(label_replace(container_memory_usage_bytes{namespace=\"$namespace\", pod_name=\"$pod\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\", pod=\"$pod\"}) by (container)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "C",
+ "step": 10
+ },
+ {
+ "expr": "sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}) by (container)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "D",
+ "step": 10
+ },
+ {
+ "expr": "sum(label_replace(container_memory_usage_bytes{namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", pod=\"$pod\"}) by (container)",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "E",
+ "step": 10
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Quota",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transform": "table",
+ "type": "table",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Memory Quota",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes-mixin"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [
+
+ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "prod",
+ "value": "prod"
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "namespace",
+ "multi": false,
+ "name": "namespace",
+ "options": [
+
+ ],
+ "query": "label_values(kube_pod_info, namespace)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 2,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "prod",
+ "value": "prod"
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "pod",
+ "multi": false,
+ "name": "pod",
+ "options": [
+
+ ],
+ "query": "label_values(kube_pod_info{namespace=\"$namespace\"}, pod)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 2,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Kubernetes / Compute Resources / Pod",
+ "uid": "6581e46e4e5c7ba40a07646395ef7b23",
+ "version": 0
+ }
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/nodes.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/nodes.yaml
new file mode 100644
index 00000000..ebc112c0
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/nodes.yaml
@@ -0,0 +1,1328 @@
+# Generated from 'nodes' from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) "nodes" | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ nodes.json: |-
+ {
+ "__inputs": [
+
+ ],
+ "__requires": [
+
+ ],
+ "annotations": {
+ "list": [
+
+ ]
+ },
+ "editable": false,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [
+
+ ],
+ "refresh": "",
+ "rows": [
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 2,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "max(node_load1{job=\"node-exporter\", instance=\"$instance\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "load 1m",
+ "refId": "A"
+ },
+ {
+ "expr": "max(node_load5{job=\"node-exporter\", instance=\"$instance\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "load 5m",
+ "refId": "B"
+ },
+ {
+ "expr": "max(node_load15{job=\"node-exporter\", instance=\"$instance\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "load 15m",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "System load",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 3,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (cpu) (irate(node_cpu_seconds_total{job=\"node-exporter\", mode!=\"idle\", instance=\"$instance\"}[5m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{cpu}}`}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Usage Per Core",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 4,
+ "legend": {
+ "alignAsTable": "true",
+ "avg": "true",
+ "current": "true",
+ "max": "false",
+ "min": "false",
+ "rightSide": "true",
+ "show": "true",
+ "total": "false",
+ "values": "true"
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 9,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "max (sum by (cpu) (irate(node_cpu_seconds_total{job=\"node-exporter\", mode!=\"idle\", instance=\"$instance\"}[2m])) ) * 100\n",
+ "format": "time_series",
+ "intervalFactor": 10,
+ "legendFormat": "{{`{{ cpu }}`}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Utilizaion",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": 100,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": 100,
+ "min": 0,
+ "show": true
+ }
+ ]
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "$datasource",
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+
+ },
+ "id": 5,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "avg(sum by (cpu) (irate(node_cpu_seconds_total{job=\"node-exporter\", mode!=\"idle\", instance=\"$instance\"}[2m]))) * 100\n",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "80, 90",
+ "title": "CPU Usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 6,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 9,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "max(\n node_memory_MemTotal_bytes{job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_MemFree_bytes{job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Buffers_bytes{job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Cached_bytes{job=\"node-exporter\", instance=\"$instance\"}\n)\n",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "memory used",
+ "refId": "A"
+ },
+ {
+ "expr": "max(node_memory_Buffers_bytes{job=\"node-exporter\", instance=\"$instance\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "memory buffers",
+ "refId": "B"
+ },
+ {
+ "expr": "max(node_memory_Cached_bytes{job=\"node-exporter\", instance=\"$instance\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "memory cached",
+ "refId": "C"
+ },
+ {
+ "expr": "max(node_memory_MemFree_bytes{job=\"node-exporter\", instance=\"$instance\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "memory free",
+ "refId": "D"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "$datasource",
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+
+ },
+ "id": 7,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "max(\n (\n (\n node_memory_MemTotal_bytes{job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_MemFree_bytes{job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Buffers_bytes{job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Cached_bytes{job=\"node-exporter\", instance=\"$instance\"}\n )\n / node_memory_MemTotal_bytes{job=\"node-exporter\", instance=\"$instance\"}\n ) * 100)\n",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "80, 90",
+ "title": "Memory Usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 8,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+ {
+ "alias": "read",
+ "yaxis": 1
+ },
+ {
+ "alias": "io time",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "max(rate(node_disk_read_bytes_total{job=\"node-exporter\", instance=\"$instance\"}[2m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "read",
+ "refId": "A"
+ },
+ {
+ "expr": "max(rate(node_disk_written_bytes_total{job=\"node-exporter\", instance=\"$instance\"}[2m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "written",
+ "refId": "B"
+ },
+ {
+ "expr": "max(rate(node_disk_io_time_seconds_total{job=\"node-exporter\", instance=\"$instance\"}[2m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "io time",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk I/O",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "ms",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 9,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "node:node_filesystem_usage:\n",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{device}}`}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk Space Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 10,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "max(rate(node_network_receive_bytes_total{job=\"node-exporter\", instance=\"$instance\", device!\u007e\"lo\"}[5m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{device}}`}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network Received",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 11,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "max(rate(node_network_transmit_bytes_total{job=\"node-exporter\", instance=\"$instance\", device!\u007e\"lo\"}[5m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{device}}`}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network Transmitted",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 12,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 9,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "max(\n node_filesystem_files{job=\"node-exporter\", instance=\"$instance\"}\n - node_filesystem_files_free{job=\"node-exporter\", instance=\"$instance\"}\n)\n",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "inodes used",
+ "refId": "A"
+ },
+ {
+ "expr": "max(node_filesystem_files_free{job=\"node-exporter\", instance=\"$instance\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "inodes free",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Inodes Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "$datasource",
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+
+ },
+ "id": 13,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "max(\n (\n (\n node_filesystem_files{job=\"node-exporter\", instance=\"$instance\"}\n - node_filesystem_files_free{job=\"node-exporter\", instance=\"$instance\"}\n )\n / node_filesystem_files{job=\"node-exporter\", instance=\"$instance\"}\n ) * 100)\n",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "80, 90",
+ "title": "Inodes Usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes-mixin"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [
+
+ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "instance",
+ "options": [
+
+ ],
+ "query": "label_values(node_boot_time_seconds{job=\"node-exporter\"}, instance)",
+ "refresh": 2,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Kubernetes / Nodes",
+ "uid": "fa49a4706d07a042595b664c87fb33ea",
+ "version": 0
+ }
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/persistentvolumesusage.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/persistentvolumesusage.yaml
new file mode 100644
index 00000000..fe32a013
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/persistentvolumesusage.yaml
@@ -0,0 +1,359 @@
+# Generated from 'persistentvolumesusage' from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) "persistentvolumesusage" | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ persistentvolumesusage.json: |-
+ {
+ "__inputs": [
+
+ ],
+ "__requires": [
+
+ ],
+ "annotations": {
+ "list": [
+
+ ]
+ },
+ "editable": false,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [
+
+ ],
+ "refresh": "",
+ "rows": [
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 2,
+ "legend": {
+ "alignAsTable": false,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "(kubelet_volume_stats_capacity_bytes{job=\"kubelet\", persistentvolumeclaim=\"$volume\"} - kubelet_volume_stats_available_bytes{job=\"kubelet\", persistentvolumeclaim=\"$volume\"}) / kubelet_volume_stats_capacity_bytes{job=\"kubelet\", persistentvolumeclaim=\"$volume\"} * 100\n",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{`{{ Usage }}`}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Volume Space Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": 100,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": 100,
+ "min": 0,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 3,
+ "legend": {
+ "alignAsTable": false,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "kubelet_volume_stats_inodes_used{job=\"kubelet\", persistentvolumeclaim=\"$volume\"} / kubelet_volume_stats_inodes{job=\"kubelet\", persistentvolumeclaim=\"$volume\"} * 100\n",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{`{{ Usage }}`}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Volume inodes Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": 100,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": 100,
+ "min": 0,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes-mixin"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [
+
+ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "Namespace",
+ "multi": false,
+ "name": "namespace",
+ "options": [
+
+ ],
+ "query": "label_values(kubelet_volume_stats_capacity_bytes{job=\"kubelet\"}, exported_namespace)",
+ "refresh": 2,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "PersistentVolumeClaim",
+ "multi": false,
+ "name": "volume",
+ "options": [
+
+ ],
+ "query": "label_values(kubelet_volume_stats_capacity_bytes{job=\"kubelet\", exported_namespace=\"$namespace\"}, persistentvolumeclaim)",
+ "refresh": 2,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-7d",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Kubernetes / Persistent Volumes",
+ "uid": "919b92a8e8041bd567af9edab12c840c",
+ "version": 0
+ }
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/pods.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/pods.yaml
new file mode 100644
index 00000000..f2bc6c40
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/pods.yaml
@@ -0,0 +1,500 @@
+# Generated from 'pods' from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) "pods" | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ pods.json: |-
+ {
+ "__inputs": [
+
+ ],
+ "__requires": [
+
+ ],
+ "annotations": {
+ "list": [
+
+ ]
+ },
+ "editable": false,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [
+
+ ],
+ "refresh": "",
+ "rows": [
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 2,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by(container_name) (container_memory_usage_bytes{job=\"kubelet\", namespace=\"$namespace\", pod_name=\"$pod\", container_name=\u007e\"$container\", container_name!=\"POD\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Current: {{`{{ container_name }}`}}",
+ "refId": "A"
+ },
+ {
+ "expr": "sum by(container) (kube_pod_container_resource_requests_memory_bytes{job=\"kube-state-metrics\", namespace=\"$namespace\", pod=\"$pod\", container=\u007e\"$container\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Requested: {{`{{ container }}`}}",
+ "refId": "B"
+ },
+ {
+ "expr": "sum by(container) (kube_pod_container_resource_limits_memory_bytes{job=\"kube-state-metrics\", namespace=\"$namespace\", pod=\"$pod\", container=\u007e\"$container\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Limit: {{`{{ container }}`}}",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Memory Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 3,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (container_name) (rate(container_cpu_usage_seconds_total{job=\"kubelet\", namespace=\"$namespace\", image!=\"\",container_name!=\"POD\",pod_name=\"$pod\"}[1m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{ container_name }}`}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 4,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (pod_name) (rate(container_network_receive_bytes_total{job=\"kubelet\", namespace=\"$namespace\", pod_name=\"$pod\"}[1m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{ pod_name }}`}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network I/O",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes-mixin"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [
+
+ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "Namespace",
+ "multi": false,
+ "name": "namespace",
+ "options": [
+
+ ],
+ "query": "label_values(kube_pod_info, namespace)",
+ "refresh": 2,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "Pod",
+ "multi": false,
+ "name": "pod",
+ "options": [
+
+ ],
+ "query": "label_values(kube_pod_info{namespace=\u007e\"$namespace\"}, pod)",
+ "refresh": 2,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": true,
+ "label": "Container",
+ "multi": false,
+ "name": "container",
+ "options": [
+
+ ],
+ "query": "label_values(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\"}, container)",
+ "refresh": 2,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Kubernetes / Pods",
+ "uid": "ab4f13a9892a76a4d21ce8c2445bf4ea",
+ "version": 0
+ }
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/statefulset.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/statefulset.yaml
new file mode 100644
index 00000000..6195833d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/dashboards/statefulset.yaml
@@ -0,0 +1,873 @@
+# Generated from 'statefulset' from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" $) "statefulset" | trunc 63 | trimSuffix "-" }}
+ labels:
+ {{- if $.Values.grafana.sidecar.dashboards.label }}
+ {{ $.Values.grafana.sidecar.dashboards.label }}: "1"
+ {{- end }}
+ app: {{ template "prometheus-operator.name" $ }}-grafana
+{{ include "prometheus-operator.labels" $ | indent 4 }}
+data:
+ statefulset.json: |-
+ {
+ "__inputs": [
+
+ ],
+ "__requires": [
+
+ ],
+ "annotations": {
+ "list": [
+
+ ]
+ },
+ "editable": false,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [
+
+ ],
+ "refresh": "",
+ "rows": [
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "$datasource",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+
+ },
+ "id": 2,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "cores",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rate(container_cpu_usage_seconds_total{job=\"kubelet\", namespace=\"$namespace\", pod_name=\u007e\"$statefulset.*\"}[3m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "CPU",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "0",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "$datasource",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+
+ },
+ "id": 3,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "GB",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(container_memory_usage_bytes{job=\"kubelet\", namespace=\"$namespace\", pod_name=\u007e\"$statefulset.*\"}) / 1024^3",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "Memory",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "0",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "$datasource",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+
+ },
+ "id": 4,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "Bps",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum(rate(container_network_transmit_bytes_total{job=\"kubelet\", namespace=\"$namespace\", pod_name=\u007e\"$statefulset.*\"}[3m])) + sum(rate(container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=\u007e\"$statefulset.*\"}[3m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "Network",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "0",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "height": "100px",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "$datasource",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+
+ },
+ "id": 5,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "max(kube_statefulset_replicas{job=\"kube-state-metrics\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "Desired Replicas",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "0",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "$datasource",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+
+ },
+ "id": 6,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "min(kube_statefulset_status_replicas_current{job=\"kube-state-metrics\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "Replicas of current version",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "0",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "$datasource",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+
+ },
+ "id": 7,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "max(kube_statefulset_status_observed_generation{job=\"kube-state-metrics\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "Observed Generation",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "0",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "$datasource",
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+
+ },
+ "id": 8,
+ "interval": null,
+ "links": [
+
+ ],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "max(kube_statefulset_metadata_generation{job=\"kube-state-metrics\", statefulset=\"$statefulset\", namespace=\"$namespace\"}) without (instance, pod)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": "",
+ "title": "Metadata Generation",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "0",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "panels": [
+ {
+ "aliasColors": {
+
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "gridPos": {
+
+ },
+ "id": 9,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [
+
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "max(kube_statefulset_replicas{job=\"kube-state-metrics\", statefulset=\"$statefulset\",namespace=\"$namespace\"}) without (instance, pod)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "replicas specified",
+ "refId": "A"
+ },
+ {
+ "expr": "max(kube_statefulset_status_replicas{job=\"kube-state-metrics\", statefulset=\"$statefulset\",namespace=\"$namespace\"}) without (instance, pod)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "replicas created",
+ "refId": "B"
+ },
+ {
+ "expr": "min(kube_statefulset_status_replicas_ready{job=\"kube-state-metrics\", statefulset=\"$statefulset\",namespace=\"$namespace\"}) without (instance, pod)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "ready",
+ "refId": "C"
+ },
+ {
+ "expr": "min(kube_statefulset_status_replicas_current{job=\"kube-state-metrics\", statefulset=\"$statefulset\",namespace=\"$namespace\"}) without (instance, pod)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "replicas of current version",
+ "refId": "D"
+ },
+ {
+ "expr": "min(kube_statefulset_status_replicas_updated{job=\"kube-state-metrics\", statefulset=\"$statefulset\",namespace=\"$namespace\"}) without (instance, pod)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "updated",
+ "refId": "E"
+ }
+ ],
+ "thresholds": [
+
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Replicas",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [
+
+ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6",
+ "type": "row"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes-mixin"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [
+
+ ],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "Namespace",
+ "multi": false,
+ "name": "namespace",
+ "options": [
+
+ ],
+ "query": "label_values(kube_statefulset_metadata_generation{job=\"kube-state-metrics\"}, namespace)",
+ "refresh": 2,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+
+ },
+ "datasource": "$datasource",
+ "hide": 0,
+ "includeAll": false,
+ "label": "Name",
+ "multi": false,
+ "name": "statefulset",
+ "options": [
+
+ ],
+ "query": "label_values(kube_statefulset_metadata_generation{job=\"kube-state-metrics\", namespace=\"$namespace\"}, statefulset)",
+ "refresh": 2,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [
+
+ ],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Kubernetes / StatefulSets",
+ "uid": "a31c1f46e6f727cb37c0d731a7245005",
+ "version": 0
+ }
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/servicemonitor.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/servicemonitor.yaml
new file mode 100644
index 00000000..954a842c
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/grafana/servicemonitor.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.grafana.enabled .Values.grafana.serviceMonitor.selfMonitor }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: ServiceMonitor
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-grafana
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-grafana
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ selector:
+ matchLabels:
+ app: grafana
+ release: {{ .Release.Name | quote }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace | quote }}
+ endpoints:
+ - port: service
+ interval: 30s
+ path: "{{ trimSuffix "/" .Values.grafana.ingress.path }}/metrics"
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/cleanup-crds.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/cleanup-crds.yaml
new file mode 100644
index 00000000..297e8391
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/cleanup-crds.yaml
@@ -0,0 +1,43 @@
+{{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.cleanupCustomResource }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-operator-cleanup
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-delete
+ "helm.sh/hook-weight": "3"
+ "helm.sh/hook-delete-policy": hook-succeeded
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ template:
+ metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-operator-cleanup
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 8 }}
+ spec:
+ {{- if .Values.global.rbac.create }}
+ serviceAccountName: {{ template "prometheus-operator.operator.serviceAccountName" . }}
+ {{- end }}
+ containers:
+ - name: kubectl
+ image: "{{ .Values.prometheusOperator.hyperkubeImage.repository }}:{{ .Values.prometheusOperator.hyperkubeImage.tag }}"
+ imagePullPolicy: "{{ .Values.prometheusOperator.hyperkubeImage.pullPolicy }}"
+ command:
+ - /bin/sh
+ - -c
+ - >
+ kubectl delete alertmanager --all;
+ kubectl delete prometheus --all;
+ kubectl delete prometheusrule --all;
+ kubectl delete servicemonitor --all;
+ sleep 10;
+ kubectl delete crd alertmanagers.monitoring.coreos.com;
+ kubectl delete crd prometheuses.monitoring.coreos.com;
+ kubectl delete crd prometheusrules.monitoring.coreos.com;
+ kubectl delete crd servicemonitors.monitoring.coreos.com;
+ restartPolicy: OnFailure
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/clusterrole.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/clusterrole.yaml
new file mode 100644
index 00000000..594a2019
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/clusterrole.yaml
@@ -0,0 +1,71 @@
+{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-operator
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+rules:
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - '*'
+- apiGroups:
+ - {{ .Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com" }}
+ resources:
+ - alertmanagers
+ - prometheuses
+ - prometheuses/finalizers
+ - alertmanagers/finalizers
+ - servicemonitors
+ - prometheusrules
+ verbs:
+ - '*'
+- apiGroups:
+ - apps
+ resources:
+ - statefulsets
+ verbs:
+ - '*'
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - '*'
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - list
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - services
+ - endpoints
+ verbs:
+ - get
+ - create
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/clusterrolebinding.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/clusterrolebinding.yaml
new file mode 100644
index 00000000..13f0ca07
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/clusterrolebinding.yaml
@@ -0,0 +1,17 @@
+{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-operator
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "prometheus-operator.fullname" . }}-operator
+subjects:
+- kind: ServiceAccount
+ name: {{ template "prometheus-operator.operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-alertmanager.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-alertmanager.yaml
new file mode 100644
index 00000000..1834d02f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-alertmanager.yaml
@@ -0,0 +1,2477 @@
+{{- if and .Release.IsInstall .Values.prometheusOperator.enabled .Values.prometheusOperator.createCustomResource -}}
+# Source https://github.com/coreos/prometheus-operator/blob/master/contrib/kube-prometheus/manifests/0prometheus-operator-0alertmanagerCustomResourceDefinition.yaml
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: {{ printf "alertmanagers.%s" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+ annotations:
+ "helm.sh/hook": crd-install
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ group: {{ .Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com" }}
+ names:
+ kind: Alertmanager
+ plural: alertmanagers
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: 'AlertmanagerSpec is a specification of the desired behavior
+ of the Alertmanager cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status'
+ properties:
+ additionalPeers:
+ description: AdditionalPeers allows injecting a set of additional Alertmanagers
+ to peer with to form a highly available cluster.
+ items:
+ type: string
+ type: array
+ affinity:
+ description: Affinity is a group of affinity scheduling rules.
+ properties:
+ nodeAffinity:
+ description: Node affinity is a group of node affinity scheduling
+ rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the affinity expressions specified by this field,
+ but it may choose a node that violates one or more of the
+ expressions. The node that is most preferred is the one with
+ the greatest sum of weights, i.e. for each node that meets
+ all of the scheduling requirements (resource request, requiredDuringScheduling
+ affinity expressions, etc.), compute a sum by iterating through
+ the elements of this field and adding "weight" to the sum
+ if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all
+ objects with implicit weight 0 (i.e. it's a no-op). A null
+ preferred scheduling term matches no objects (i.e. is also
+ a no-op).
+ properties:
+ preference:
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed. The
+ TopologySelectorTerm type implements a subset of the
+ NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ weight:
+ description: Weight associated with matching the corresponding
+ nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - preference
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: A node selector represents the union of the results
+ of one or more label queries over a set of nodes; that is,
+ it represents the OR of the selectors represented by the node
+ selector terms.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The
+ terms are ORed.
+ items:
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed. The
+ TopologySelectorTerm type implements a subset of the
+ NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ type: array
+ required:
+ - nodeSelectorTerms
+ podAffinity:
+ description: Pod affinity is a group of inter pod affinity scheduling
+ rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the affinity expressions specified by this field,
+ but it may choose a node that violates one or more of the
+ expressions. The node that is most preferred is the one with
+ the greatest sum of weights, i.e. for each node that meets
+ all of the scheduling requirements (resource request, requiredDuringScheduling
+ affinity expressions, etc.), compute a sum by iterating through
+ the elements of this field and adding "weight" to the sum
+ if the node has pods which matches the corresponding podAffinityTerm;
+ the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key <topologyKey> matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this
+ field are not met at scheduling time, the pod will not be
+ scheduled onto the node. If the affinity requirements specified
+ by this field cease to be met at some point during pod execution
+ (e.g. due to a pod label update), the system may or may not
+ try to eventually evict the pod from its node. When there
+ are multiple elements, the lists of nodes corresponding to
+ each podAffinityTerm are intersected, i.e. all terms must
+ be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s)) that
+ this pod should be co-located (affinity) or not co-located
+ (anti-affinity) with, where co-located is defined as running
+ on a node whose value of the label with key <topologyKey>
+ matches that of any node on which a pod of the set of pods
+ is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over a
+ set of resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects.
+ A null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator is
+ Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field
+ is "key", the operator is "In", and the values array
+ contains only "value". The requirements are ANDed.
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the
+ labelSelector applies to (matches against); null or
+ empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where
+ co-located is defined as running on a node whose value
+ of the label with key topologyKey matches that of any
+ node on which any of the selected pods is running. Empty
+ topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: array
+ podAntiAffinity:
+ description: Pod anti affinity is a group of inter pod anti affinity
+ scheduling rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the anti-affinity expressions specified by this
+ field, but it may choose a node that violates one or more
+ of the expressions. The node that is most preferred is the
+ one with the greatest sum of weights, i.e. for each node that
+ meets all of the scheduling requirements (resource request,
+ requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field
+ and adding "weight" to the sum if the node has pods which
+ matches the corresponding podAffinityTerm; the node(s) with
+ the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key <topologyKey> matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by
+ this field are not met at scheduling time, the pod will not
+ be scheduled onto the node. If the anti-affinity requirements
+ specified by this field cease to be met at some point during
+ pod execution (e.g. due to a pod label update), the system
+ may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding
+ to each podAffinityTerm are intersected, i.e. all terms must
+ be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s)) that
+ this pod should be co-located (affinity) or not co-located
+ (anti-affinity) with, where co-located is defined as running
+ on a node whose value of the label with key <topologyKey>
+ matches that of any node on which a pod of the set of pods
+ is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over a
+ set of resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects.
+ A null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator is
+ Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field
+ is "key", the operator is "In", and the values array
+ contains only "value". The requirements are ANDed.
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the
+ labelSelector applies to (matches against); null or
+ empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where
+ co-located is defined as running on a node whose value
+ of the label with key topologyKey matches that of any
+ node on which any of the selected pods is running. Empty
+ topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: array
+ baseImage:
+ description: Base image that is used to deploy pods, without tag.
+ type: string
+ configMaps:
+ description: ConfigMaps is a list of ConfigMaps in the same namespace
+ as the Alertmanager object, which shall be mounted into the Alertmanager
+ Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/<configmap-name>.
+ items:
+ type: string
+ type: array
+ containers:
+ description: Containers allows injecting additional containers. This
+ is meant to allow adding an authentication proxy to an Alertmanager
+ pod.
+ items:
+ description: A single application container that you want to run within
+ a pod.
+ properties:
+ args:
+ description: 'Arguments to the entrypoint. The docker image''s
+ CMD is used if this is not provided. Variable references $(VAR_NAME)
+ are expanded using the container''s environment. If a variable
+ cannot be resolved, the reference in the input string will be
+ unchanged. The $(VAR_NAME) syntax can be escaped with a double
+ $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+ regardless of whether the variable exists or not. Cannot be
+ updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
+ items:
+ type: string
+ type: array
+ command:
+ description: 'Entrypoint array. Not executed within a shell. The
+ docker image''s ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container''s
+ environment. If a variable cannot be resolved, the reference
+ in the input string will be unchanged. The $(VAR_NAME) syntax
+ can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references
+ will never be expanded, regardless of whether the variable exists
+ or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
+ items:
+ type: string
+ type: array
+ env:
+ description: List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previous defined environment variables in the
+ container and any service environment variables. If a
+ variable cannot be resolved, the reference in the input
+ string will be unchanged. The $(VAR_NAME) syntax can be
+ escaped with a double $$, ie: $$(VAR_NAME). Escaped references
+ will never be expanded, regardless of whether the variable
+ exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: EnvVarSource represents a source for the value
+ of an EnvVar.
+ properties:
+ configMapKeyRef:
+ description: Selects a key from a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or it's
+ key must be defined
+ type: boolean
+ required:
+ - key
+ fieldRef:
+ description: ObjectFieldSelector selects an APIVersioned
+ field of an object.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ resourceFieldRef:
+ description: ResourceFieldSelector represents container
+ resources (cpu, memory) and their output format
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor: {}
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ secretKeyRef:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's
+ key must be defined
+ type: boolean
+ required:
+ - key
+ required:
+ - name
+ type: array
+ envFrom:
+ description: List of sources to populate environment variables
+ in the container. The keys defined within a source must be a
+ C_IDENTIFIER. All invalid keys will be reported as an event
+ when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take
+ precedence. Values defined by an Env with a duplicate key will
+ take precedence. Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of a set of
+ ConfigMaps
+ properties:
+ configMapRef:
+ description: |-
+ ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.
+
+ The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap must be defined
+ type: boolean
+ prefix:
+ description: An optional identifier to prepend to each key
+ in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: |-
+ SecretEnvSource selects a Secret to populate the environment variables with.
+
+ The contents of the target Secret's Data field will represent the key-value pairs as environment variables.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret must be defined
+ type: boolean
+ type: array
+ image:
+ description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management
+ to default or override container images in workload controllers
+ like Deployments and StatefulSets.'
+ type: string
+ imagePullPolicy:
+ description: 'Image pull policy. One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent
+ otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images'
+ type: string
+ lifecycle:
+ description: Lifecycle describes actions that the management system
+ should take in response to container lifecycle events. For the
+ PostStart and PreStop lifecycle handlers, management of the
+ container blocks until the action is complete, unless the container
+ process fails, in which case the handler is aborted.
+ properties:
+ postStart:
+ description: Handler defines a specific action that should
+ be taken
+ properties:
+ exec:
+ description: ExecAction describes a "run in container"
+ action.
+ properties:
+ command:
+ description: Command is the command line to execute
+ inside the container, the working directory for
+ the command is root ('/') in the container's filesystem.
+ The command is simply exec'd, it is not run inside
+ a shell, so traditional shell instructions ('|',
+ etc) won't work. To use a shell, you need to explicitly
+ call out to that shell. Exit status of 0 is treated
+ as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ httpGet:
+ description: HTTPGetAction describes an action based on
+ HTTP Get requests.
+ properties:
+ host:
+ description: Host name to connect to, defaults to
+ the pod IP. You probably want to set "Host" in httpHeaders
+ instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header
+ to be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ scheme:
+ description: Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ tcpSocket:
+ description: TCPSocketAction describes an action based
+ on opening a socket
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults
+ to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ required:
+ - port
+ preStop:
+ description: Handler defines a specific action that should
+ be taken
+ properties:
+ exec:
+ description: ExecAction describes a "run in container"
+ action.
+ properties:
+ command:
+ description: Command is the command line to execute
+ inside the container, the working directory for
+ the command is root ('/') in the container's filesystem.
+ The command is simply exec'd, it is not run inside
+ a shell, so traditional shell instructions ('|',
+ etc) won't work. To use a shell, you need to explicitly
+ call out to that shell. Exit status of 0 is treated
+ as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ httpGet:
+ description: HTTPGetAction describes an action based on
+ HTTP Get requests.
+ properties:
+ host:
+ description: Host name to connect to, defaults to
+ the pod IP. You probably want to set "Host" in httpHeaders
+ instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header
+ to be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ scheme:
+ description: Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ tcpSocket:
+ description: TCPSocketAction describes an action based
+ on opening a socket
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults
+ to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ required:
+ - port
+ livenessProbe:
+ description: Probe describes a health check to be performed against
+ a container to determine whether it is alive or ready to receive
+ traffic.
+ properties:
+ exec:
+ description: ExecAction describes a "run in container" action.
+ properties:
+ command:
+ description: Command is the command line to execute inside
+ the container, the working directory for the command is
+ root ('/') in the container's filesystem. The command
+ is simply exec'd, it is not run inside a shell, so traditional
+ shell instructions ('|', etc) won't work. To use a shell,
+ you need to explicitly call out to that shell. Exit
+ status of 0 is treated as live/healthy and non-zero
+ is unhealthy.
+ items:
+ type: string
+ type: array
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to
+ be considered failed after having succeeded. Defaults to
+ 3. Minimum value is 1.
+ format: int32
+ type: integer
+ httpGet:
+ description: HTTPGetAction describes an action based on HTTP
+ Get requests.
+ properties:
+ host:
+ description: Host name to connect to, defaults to the
+ pod IP. You probably want to set "Host" in httpHeaders
+ instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP
+ allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to
+ be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ scheme:
+ description: Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ initialDelaySeconds:
+ description: 'Number of seconds after the container has started
+ before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the probe to
+ be considered successful after having failed. Defaults to
+ 1. Must be 1 for liveness. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocketAction describes an action based on
+ opening a socket
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults
+ to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ required:
+ - port
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe times
+ out. Defaults to 1 second. Minimum value is 1. More info:
+ https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ name:
+ description: Name of the container specified as a DNS_LABEL. Each
+ container in a pod must have a unique name (DNS_LABEL). Cannot
+ be updated.
+ type: string
+ ports:
+ description: List of ports to expose from the container. Exposing
+ a port here gives the system additional information about the
+ network connections a container uses, but is primarily informational.
+ Not specifying a port here DOES NOT prevent that port from being
+ exposed. Any port which is listening on the default "0.0.0.0"
+ address inside a container will be accessible from the network.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port in a single
+ container.
+ properties:
+ containerPort:
+ description: Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external port to.
+ type: string
+ hostPort:
+ description: Number of port to expose on the host. If specified,
+ this must be a valid port number, 0 < x < 65536. If HostNetwork
+ is specified, this must match ContainerPort. Most containers
+ do not need this.
+ format: int32
+ type: integer
+ name:
+ description: If specified, this must be an IANA_SVC_NAME
+ and unique within the pod. Each named port in a pod must
+ have a unique name. Name for the port that can be referred
+ to by services.
+ type: string
+ protocol:
+ description: Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: array
+ readinessProbe:
+ description: Probe describes a health check to be performed against
+ a container to determine whether it is alive or ready to receive
+ traffic.
+ properties:
+ exec:
+ description: ExecAction describes a "run in container" action.
+ properties:
+ command:
+ description: Command is the command line to execute inside
+ the container, the working directory for the command is
+ root ('/') in the container's filesystem. The command
+ is simply exec'd, it is not run inside a shell, so traditional
+ shell instructions ('|', etc) won't work. To use a shell,
+ you need to explicitly call out to that shell. Exit
+ status of 0 is treated as live/healthy and non-zero
+ is unhealthy.
+ items:
+ type: string
+ type: array
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to
+ be considered failed after having succeeded. Defaults to
+ 3. Minimum value is 1.
+ format: int32
+ type: integer
+ httpGet:
+ description: HTTPGetAction describes an action based on HTTP
+ Get requests.
+ properties:
+ host:
+ description: Host name to connect to, defaults to the
+ pod IP. You probably want to set "Host" in httpHeaders
+ instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP
+ allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to
+ be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ scheme:
+ description: Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ initialDelaySeconds:
+ description: 'Number of seconds after the container has started
+ before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the probe to
+ be considered successful after having failed. Defaults to
+ 1. Must be 1 for liveness. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocketAction describes an action based on
+ opening a socket
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults
+ to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ required:
+ - port
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe times
+ out. Defaults to 1 second. Minimum value is 1. More info:
+ https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ securityContext:
+ description: SecurityContext holds security configuration that
+ will be applied to a container. Some fields are present in both
+ SecurityContext and PodSecurityContext. When both are set,
+ the values in SecurityContext take precedence.
+ properties:
+ allowPrivilegeEscalation:
+ description: 'AllowPrivilegeEscalation controls whether a
+ process can gain more privileges than its parent process.
+ This bool directly controls if the no_new_privs flag will
+ be set on the container process. AllowPrivilegeEscalation
+ is true always when the container is: 1) run as Privileged
+ 2) has CAP_SYS_ADMIN'
+ type: boolean
+ capabilities:
+ description: Adds and removes POSIX capabilities from running
+ containers.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ type: string
+ type: array
+ drop:
+ description: Removed capabilities
+ items:
+ type: string
+ type: array
+ privileged:
+ description: Run container in privileged mode. Processes in
+ privileged containers are essentially equivalent to root
+ on the host. Defaults to false.
+ type: boolean
+ procMount:
+ description: procMount denotes the type of proc mount to use
+ for the containers. The default is DefaultProcMount which
+ uses the container runtime defaults for readonly paths and
+ masked paths. This requires the ProcMountType feature flag
+ to be enabled.
+ type: string
+ readOnlyRootFilesystem:
+ description: Whether this container has a read-only root filesystem.
+ Default is false.
+ type: boolean
+ runAsGroup:
+ description: The GID to run the entrypoint of the container
+ process. Uses runtime default if unset. May also be set
+ in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext
+ takes precedence.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: Indicates that the container must run as a non-root
+ user. If true, the Kubelet will validate the image at runtime
+ to ensure that it does not run as UID 0 (root) and fail
+ to start the container if it does. If unset or false, no
+ such validation will be performed. May also be set in PodSecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the
+ value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: The UID to run the entrypoint of the container
+ process. Defaults to user specified in image metadata if
+ unspecified. May also be set in PodSecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the
+ value specified in SecurityContext takes precedence.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: SELinuxOptions are the labels to be applied to
+ the container
+ properties:
+ level:
+ description: Level is SELinux level label that applies
+ to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies
+ to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies
+ to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies
+ to the container.
+ type: string
+ stdin:
+ description: Whether this container should allocate a buffer for
+ stdin in the container runtime. If this is not set, reads from
+ stdin in the container will always result in EOF. Default is
+ false.
+ type: boolean
+ stdinOnce:
+ description: Whether the container runtime should close the stdin
+ channel after it has been opened by a single attach. When stdin
+ is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container
+ start, is empty until the first client attaches to stdin, and
+ then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container
+ is restarted. If this flag is false, a container processes that
+ reads from stdin will never receive an EOF. Default is false
+ type: boolean
+ terminationMessagePath:
+ description: 'Optional: Path at which the file to which the container''s
+ termination message will be written is mounted into the container''s
+ filesystem. Message written is intended to be brief final status,
+ such as an assertion failure message. Will be truncated by the
+ node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb. Defaults to /dev/termination-log.
+ Cannot be updated.'
+ type: string
+ terminationMessagePolicy:
+ description: Indicate how the termination message should be populated.
+ File will use the contents of terminationMessagePath to populate
+ the container status message on both success and failure. FallbackToLogsOnError
+ will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever
+ is smaller. Defaults to File. Cannot be updated.
+ type: string
+ tty:
+ description: Whether this container should allocate a TTY for
+ itself, also requires 'stdin' to be true. Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices to be
+ used by the container. This is an alpha feature and may change
+ in the future.
+ items:
+ description: volumeDevice describes a mapping of a raw block
+ device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of the container
+ that the device will be mapped to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - name
+ - devicePath
+ type: array
+ volumeMounts:
+ description: Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a Volume within
+ a container.
+ properties:
+ mountPath:
+ description: Path within the container at which the volume
+ should be mounted. Must not contain ':'.
+ type: string
+ mountPropagation:
+ description: mountPropagation determines how mounts are
+ propagated from the host to container and the other way
+ around. When not set, MountPropagationNone is used. This
+ field is beta in 1.10.
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: Mounted read-only if true, read-write otherwise
+ (false or unspecified). Defaults to false.
+ type: boolean
+ subPath:
+ description: Path within the volume from which the container's
+ volume should be mounted. Defaults to "" (volume's root).
+ type: string
+ required:
+ - name
+ - mountPath
+ type: array
+ workingDir:
+ description: Container's working directory. If not specified,
+ the container runtime's default will be used, which might be
+ configured in the container image. Cannot be updated.
+ type: string
+ required:
+ - name
+ type: array
+ externalUrl:
+ description: The external URL the Alertmanager instances will be available
+ under. This is necessary to generate correct URLs. This is necessary
+ if Alertmanager is not served from root of a DNS name.
+ type: string
+ imagePullSecrets:
+ description: An optional list of references to secrets in the same namespace
+ to use for pulling prometheus and alertmanager images from registries
+ see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
+ items:
+ description: LocalObjectReference contains enough information to let
+ you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ type: array
+ listenLocal:
+ description: ListenLocal makes the Alertmanager server listen on loopback,
+ so that it does not bind against the Pod IP. Note this is only for
+ the Alertmanager UI, not the gossip communication.
+ type: boolean
+ logLevel:
+ description: Log level for Alertmanager to be configured with.
+ type: string
+ nodeSelector:
+ description: Define which Nodes the Pods are scheduled on.
+ type: object
+ paused:
+ description: If set to true all actions on the underlaying managed objects
+ are not goint to be performed, except for delete actions.
+ type: boolean
+ podMetadata:
+ description: ObjectMeta is metadata that all persisted resources must
+ have, which includes all objects users must create.
+ properties:
+ annotations:
+ description: 'Annotations is an unstructured key value map stored
+ with a resource that may be set by external tools to store and
+ retrieve arbitrary metadata. They are not queryable and should
+ be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
+ type: object
+ clusterName:
+ description: The name of the cluster which the object belongs to.
+ This is used to distinguish resources with same name and namespace
+ in different clusters. This field is not set anywhere right now
+ and apiserver is going to ignore it if set in create or update
+ request.
+ type: string
+ creationTimestamp:
+ description: Time is a wrapper around time.Time which supports correct
+ marshaling to YAML and JSON. Wrappers are provided for many of
+ the factory methods that the time package offers.
+ format: date-time
+ type: string
+ deletionGracePeriodSeconds:
+ description: Number of seconds allowed for this object to gracefully
+ terminate before it will be removed from the system. Only set
+ when deletionTimestamp is also set. May only be shortened. Read-only.
+ format: int64
+ type: integer
+ deletionTimestamp:
+ description: Time is a wrapper around time.Time which supports correct
+ marshaling to YAML and JSON. Wrappers are provided for many of
+ the factory methods that the time package offers.
+ format: date-time
+ type: string
+ finalizers:
+ description: Must be empty before the object is deleted from the
+ registry. Each entry is an identifier for the responsible component
+ that will remove the entry from the list. If the deletionTimestamp
+ of the object is non-nil, entries in this list can only be removed.
+ items:
+ type: string
+ type: array
+ generateName:
+ description: |-
+ GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.
+
+ If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).
+
+ Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
+ type: string
+ generation:
+ description: A sequence number representing a specific generation
+ of the desired state. Populated by the system. Read-only.
+ format: int64
+ type: integer
+ initializers:
+ description: Initializers tracks the progress of initialization.
+ properties:
+ pending:
+ description: Pending is a list of initializers that must execute
+ in order before this object is visible. When the last pending
+ initializer is removed, and no failing result is set, the
+ initializers struct will be set to nil and the object is considered
+ as initialized and visible to all clients.
+ items:
+ description: Initializer is information about an initializer
+ that has not yet completed.
+ properties:
+ name:
+ description: name of the process that is responsible for
+ initializing this object.
+ type: string
+ required:
+ - name
+ type: array
+ result:
+ description: Status is a return value for calls that don't return
+ other objects.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of
+ this representation of an object. Servers should convert
+ recognized schemas to the latest internal value, and may
+ reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ code:
+ description: Suggested HTTP return code for this status,
+ 0 if not set.
+ format: int32
+ type: integer
+ details:
+ description: StatusDetails is a set of additional properties
+ that MAY be set by the server to provide additional information
+ about a response. The Reason field of a Status object
+ defines what attributes will be set. Clients must ignore
+ fields that do not match the defined type of each attribute,
+ and should assume that any attribute may be empty, invalid,
+ or under defined.
+ properties:
+ causes:
+ description: The Causes array includes more details
+ associated with the StatusReason failure. Not all
+ StatusReasons may provide detailed causes.
+ items:
+ description: StatusCause provides more information
+ about an api.Status failure, including cases when
+ multiple errors are encountered.
+ properties:
+ field:
+ description: |-
+ The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.
+
+ Examples:
+ "name" - the field "name" on the current resource
+ "items[0].name" - the field "name" on the first array entry in "items"
+ type: string
+ message:
+ description: A human-readable description of the
+ cause of the error. This field may be presented
+ as-is to a reader.
+ type: string
+ reason:
+ description: A machine-readable description of
+ the cause of the error. If this value is empty
+ there is no information available.
+ type: string
+ type: array
+ group:
+ description: The group attribute of the resource associated
+ with the status StatusReason.
+ type: string
+ kind:
+ description: 'The kind attribute of the resource associated
+ with the status StatusReason. On some operations may
+ differ from the requested resource Kind. More info:
+ https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: The name attribute of the resource associated
+ with the status StatusReason (when there is a single
+ name which can be described).
+ type: string
+ retryAfterSeconds:
+ description: If specified, the time in seconds before
+ the operation should be retried. Some errors may indicate
+ the client must take an alternate action - for those
+ errors this field may indicate how long to wait before
+ taking the alternate action.
+ format: int32
+ type: integer
+ uid:
+ description: 'UID of the resource. (when there is a
+ single resource which can be described). More info:
+ http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST
+ resource this object represents. Servers may infer this
+ from the endpoint the client submits requests to. Cannot
+ be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ message:
+ description: A human-readable description of the status
+ of this operation.
+ type: string
+ metadata:
+ description: ListMeta describes metadata that synthetic
+ resources must have, including lists and various status
+ objects. A resource may have only one of {ObjectMeta,
+ ListMeta}.
+ properties:
+ continue:
+ description: continue may be set if the user set a limit
+ on the number of items returned, and indicates that
+ the server has more data available. The value is opaque
+ and may be used to issue another request to the endpoint
+ that served this list to retrieve the next set of
+ available objects. Continuing a consistent list may
+ not be possible if the server configuration has changed
+ or more than a few minutes have passed. The resourceVersion
+ field returned when using this continue value will
+ be identical to the value in the first response, unless
+ you have received this token from an error message.
+ type: string
+ resourceVersion:
+ description: 'String that identifies the server''s internal
+ version of this object that can be used by clients
+ to determine when objects have changed. Value must
+ be treated as opaque by clients and passed unmodified
+ back to the server. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ selfLink:
+ description: selfLink is a URL representing this object.
+ Populated by the system. Read-only.
+ type: string
+ reason:
+ description: A machine-readable description of why this
+ operation is in the "Failure" status. If this value is
+ empty there is no information available. A Reason clarifies
+ an HTTP status code but does not override it.
+ type: string
+ status:
+ description: 'Status of the operation. One of: "Success"
+ or "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
+ type: string
+ required:
+ - pending
+ labels:
+ description: 'Map of string keys and values that can be used to
+ organize and categorize (scope and select) objects. May match
+ selectors of replication controllers and services. More info:
+ http://kubernetes.io/docs/user-guide/labels'
+ type: object
+ name:
+ description: 'Name must be unique within a namespace. Is required
+ when creating resources, although some resources may allow a client
+ to request the generation of an appropriate name automatically.
+ Name is primarily intended for creation idempotence and configuration
+ definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ namespace:
+ description: |-
+ Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.
+
+ Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces
+ type: string
+ ownerReferences:
+ description: List of objects depended by this object. If ALL objects
+ in the list have been deleted, this object will be garbage collected.
+ If this object is managed by a controller, then an entry in this
+ list will point to this controller, with the controller field
+ set to true. There cannot be more than one managing controller.
+ items:
+ description: OwnerReference contains enough information to let
+ you identify an owning object. Currently, an owning object must
+ be in the same namespace, so there is no namespace field.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ blockOwnerDeletion:
+ description: If true, AND if the owner has the "foregroundDeletion"
+ finalizer, then the owner cannot be deleted from the key-value
+ store until this reference is removed. Defaults to false.
+ To set this field, a user needs "delete" permission of the
+ owner, otherwise 422 (Unprocessable Entity) will be returned.
+ type: boolean
+ controller:
+ description: If true, this reference points to the managing
+ controller.
+ type: boolean
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ required:
+ - apiVersion
+ - kind
+ - name
+ - uid
+ type: array
+ resourceVersion:
+ description: |-
+ An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.
+
+ Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ selfLink:
+ description: SelfLink is a URL representing this object. Populated
+ by the system. Read-only.
+ type: string
+ uid:
+ description: |-
+ UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.
+
+ Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+ type: string
+ priorityClassName:
+ description: Priority class assigned to the Pods
+ type: string
+ replicas:
+ description: Size is the expected size of the alertmanager cluster.
+ The controller will eventually make the size of the running cluster
+ equal to the expected size.
+ format: int32
+ type: integer
+ resources:
+ description: ResourceRequirements describes the compute resource requirements.
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of compute resources
+ allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ description: 'Requests describes the minimum amount of compute resources
+ required. If Requests is omitted for a container, it defaults
+ to Limits if that is explicitly specified, otherwise to an implementation-defined
+ value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ retention:
+ description: Time duration Alertmanager shall retain data for. Default
+ is '120h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)`
+ (milliseconds seconds minutes hours days weeks years).
+ type: string
+ routePrefix:
+ description: The route prefix Alertmanager registers HTTP handlers for.
+ This is useful, if using ExternalURL and a proxy is rewriting HTTP
+ routes of a request, and the actual ExternalURL is still true, but
+ the server serves requests under a different route prefix. For example
+ for use with `kubectl proxy`.
+ type: string
+ secrets:
+ description: Secrets is a list of Secrets in the same namespace as the
+ Alertmanager object, which shall be mounted into the Alertmanager
+ Pods. The Secrets are mounted into /etc/alertmanager/secrets/<secret-name>.
+ items:
+ type: string
+ type: array
+ securityContext:
+ description: PodSecurityContext holds pod-level security attributes
+ and common container settings. Some fields are also present in container.securityContext. Field
+ values of container.securityContext take precedence over field values
+ of PodSecurityContext.
+ properties:
+ fsGroup:
+ description: |-
+ A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
+
+ 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----
+
+ If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ format: int64
+ type: integer
+ runAsGroup:
+ description: The GID to run the entrypoint of the container process.
+ Uses runtime default if unset. May also be set in SecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the value
+ specified in SecurityContext takes precedence for that container.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: Indicates that the container must run as a non-root
+ user. If true, the Kubelet will validate the image at runtime
+ to ensure that it does not run as UID 0 (root) and fail to start
+ the container if it does. If unset or false, no such validation
+ will be performed. May also be set in SecurityContext. If set
+ in both SecurityContext and PodSecurityContext, the value specified
+ in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified. May
+ also be set in SecurityContext. If set in both SecurityContext
+ and PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: SELinuxOptions are the labels to be applied to the
+ container
+ properties:
+ level:
+ description: Level is SELinux level label that applies to the
+ container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies to the
+ container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies to the
+ container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies to the
+ container.
+ type: string
+ supplementalGroups:
+ description: A list of groups applied to the first process run in
+ each container, in addition to the container's primary GID. If
+ unspecified, no groups will be added to any container.
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ description: Sysctls hold a list of namespaced sysctls used for
+ the pod. Pods with unsupported sysctls (by the container runtime)
+ might fail to launch.
+ items:
+ description: Sysctl defines a kernel parameter to be set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: array
+ serviceAccountName:
+ description: ServiceAccountName is the name of the ServiceAccount to
+ use to run the Prometheus Pods.
+ type: string
+ sha:
+ description: SHA of Alertmanager container image to be deployed. Defaults
+ to the value of `version`. Similar to a tag, but the SHA explicitly
+ deploys an immutable container image. Version and Tag are ignored
+ if SHA is set.
+ type: string
+ storage:
+ description: StorageSpec defines the configured storage for a group
+ Prometheus servers. If neither `emptyDir` nor `volumeClaimTemplate`
+ is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir)
+ will be used.
+ properties:
+ class:
+ description: 'Name of the StorageClass to use when requesting storage
+ provisioning. More info: https://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses
+ (DEPRECATED - instead use `volumeClaimTemplate.spec.storageClassName`)'
+ type: string
+ emptyDir:
+ description: Represents an empty directory for a pod. Empty directory
+ volumes support ownership management and SELinux relabeling.
+ properties:
+ medium:
+ description: 'What type of storage medium should back this directory.
+ The default is "" which means to use the node''s default medium.
+ Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
+ type: string
+ sizeLimit: {}
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ selector:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ volumeClaimTemplate:
+ description: PersistentVolumeClaim is a user's request for and claim
+ to a persistent volume
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this
+ representation of an object. Servers should convert recognized
+ schemas to the latest internal value, and may reject unrecognized
+ values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource
+ this object represents. Servers may infer this from the endpoint
+ the client submits requests to. Cannot be updated. In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ description: ObjectMeta is metadata that all persisted resources
+ must have, which includes all objects users must create.
+ properties:
+ annotations:
+ description: 'Annotations is an unstructured key value map
+ stored with a resource that may be set by external tools
+ to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations'
+ type: object
+ clusterName:
+ description: The name of the cluster which the object belongs
+ to. This is used to distinguish resources with same name
+ and namespace in different clusters. This field is not
+ set anywhere right now and apiserver is going to ignore
+ it if set in create or update request.
+ type: string
+ creationTimestamp:
+ description: Time is a wrapper around time.Time which supports
+ correct marshaling to YAML and JSON. Wrappers are provided
+ for many of the factory methods that the time package
+ offers.
+ format: date-time
+ type: string
+ deletionGracePeriodSeconds:
+ description: Number of seconds allowed for this object to
+ gracefully terminate before it will be removed from the
+ system. Only set when deletionTimestamp is also set. May
+ only be shortened. Read-only.
+ format: int64
+ type: integer
+ deletionTimestamp:
+ description: Time is a wrapper around time.Time which supports
+ correct marshaling to YAML and JSON. Wrappers are provided
+ for many of the factory methods that the time package
+ offers.
+ format: date-time
+ type: string
+ finalizers:
+ description: Must be empty before the object is deleted
+ from the registry. Each entry is an identifier for the
+ responsible component that will remove the entry from
+ the list. If the deletionTimestamp of the object is non-nil,
+ entries in this list can only be removed.
+ items:
+ type: string
+ type: array
+ generateName:
+ description: |-
+ GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.
+
+ If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).
+
+ Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
+ type: string
+ generation:
+ description: A sequence number representing a specific generation
+ of the desired state. Populated by the system. Read-only.
+ format: int64
+ type: integer
+ initializers:
+ description: Initializers tracks the progress of initialization.
+ properties:
+ pending:
+ description: Pending is a list of initializers that
+ must execute in order before this object is visible.
+ When the last pending initializer is removed, and
+ no failing result is set, the initializers struct
+ will be set to nil and the object is considered as
+ initialized and visible to all clients.
+ items:
+ description: Initializer is information about an initializer
+ that has not yet completed.
+ properties:
+ name:
+ description: name of the process that is responsible
+ for initializing this object.
+ type: string
+ required:
+ - name
+ type: array
+ result:
+ description: Status is a return value for calls that
+ don't return other objects.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema
+ of this representation of an object. Servers should
+ convert recognized schemas to the latest internal
+ value, and may reject unrecognized values. More
+ info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ code:
+ description: Suggested HTTP return code for this
+ status, 0 if not set.
+ format: int32
+ type: integer
+ details:
+ description: StatusDetails is a set of additional
+ properties that MAY be set by the server to provide
+ additional information about a response. The Reason
+ field of a Status object defines what attributes
+ will be set. Clients must ignore fields that do
+ not match the defined type of each attribute,
+ and should assume that any attribute may be empty,
+ invalid, or under defined.
+ properties:
+ causes:
+ description: The Causes array includes more
+ details associated with the StatusReason failure.
+ Not all StatusReasons may provide detailed
+ causes.
+ items:
+ description: StatusCause provides more information
+ about an api.Status failure, including cases
+ when multiple errors are encountered.
+ properties:
+ field:
+ description: |-
+ The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.
+
+ Examples:
+ "name" - the field "name" on the current resource
+ "items[0].name" - the field "name" on the first array entry in "items"
+ type: string
+ message:
+ description: A human-readable description
+ of the cause of the error. This field
+ may be presented as-is to a reader.
+ type: string
+ reason:
+ description: A machine-readable description
+ of the cause of the error. If this value
+ is empty there is no information available.
+ type: string
+ type: array
+ group:
+ description: The group attribute of the resource
+ associated with the status StatusReason.
+ type: string
+ kind:
+ description: 'The kind attribute of the resource
+ associated with the status StatusReason. On
+ some operations may differ from the requested
+ resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: The name attribute of the resource
+ associated with the status StatusReason (when
+ there is a single name which can be described).
+ type: string
+ retryAfterSeconds:
+ description: If specified, the time in seconds
+ before the operation should be retried. Some
+ errors may indicate the client must take an
+ alternate action - for those errors this field
+ may indicate how long to wait before taking
+ the alternate action.
+ format: int32
+ type: integer
+ uid:
+ description: 'UID of the resource. (when there
+ is a single resource which can be described).
+ More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ kind:
+ description: 'Kind is a string value representing
+ the REST resource this object represents. Servers
+ may infer this from the endpoint the client submits
+ requests to. Cannot be updated. In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ message:
+ description: A human-readable description of the
+ status of this operation.
+ type: string
+ metadata:
+ description: ListMeta describes metadata that synthetic
+ resources must have, including lists and various
+ status objects. A resource may have only one of
+ {ObjectMeta, ListMeta}.
+ properties:
+ continue:
+ description: continue may be set if the user
+ set a limit on the number of items returned,
+ and indicates that the server has more data
+ available. The value is opaque and may be
+ used to issue another request to the endpoint
+ that served this list to retrieve the next
+ set of available objects. Continuing a consistent
+ list may not be possible if the server configuration
+ has changed or more than a few minutes have
+ passed. The resourceVersion field returned
+ when using this continue value will be identical
+ to the value in the first response, unless
+ you have received this token from an error
+ message.
+ type: string
+ resourceVersion:
+ description: 'String that identifies the server''s
+ internal version of this object that can be
+ used by clients to determine when objects
+ have changed. Value must be treated as opaque
+ by clients and passed unmodified back to the
+ server. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ selfLink:
+ description: selfLink is a URL representing
+ this object. Populated by the system. Read-only.
+ type: string
+ reason:
+ description: A machine-readable description of why
+ this operation is in the "Failure" status. If
+ this value is empty there is no information available.
+ A Reason clarifies an HTTP status code but does
+ not override it.
+ type: string
+ status:
+ description: 'Status of the operation. One of: "Success"
+ or "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
+ type: string
+ required:
+ - pending
+ labels:
+ description: 'Map of string keys and values that can be
+ used to organize and categorize (scope and select) objects.
+ May match selectors of replication controllers and services.
+ More info: http://kubernetes.io/docs/user-guide/labels'
+ type: object
+ name:
+ description: 'Name must be unique within a namespace. Is
+ required when creating resources, although some resources
+ may allow a client to request the generation of an appropriate
+ name automatically. Name is primarily intended for creation
+ idempotence and configuration definition. Cannot be updated.
+ More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ namespace:
+ description: |-
+ Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.
+
+ Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces
+ type: string
+ ownerReferences:
+ description: List of objects depended by this object. If
+ ALL objects in the list have been deleted, this object
+ will be garbage collected. If this object is managed by
+ a controller, then an entry in this list will point to
+ this controller, with the controller field set to true.
+ There cannot be more than one managing controller.
+ items:
+ description: OwnerReference contains enough information
+ to let you identify an owning object. Currently, an
+ owning object must be in the same namespace, so there
+ is no namespace field.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ blockOwnerDeletion:
+ description: If true, AND if the owner has the "foregroundDeletion"
+ finalizer, then the owner cannot be deleted from
+ the key-value store until this reference is removed.
+ Defaults to false. To set this field, a user needs
+ "delete" permission of the owner, otherwise 422
+ (Unprocessable Entity) will be returned.
+ type: boolean
+ controller:
+ description: If true, this reference points to the
+ managing controller.
+ type: boolean
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ required:
+ - apiVersion
+ - kind
+ - name
+ - uid
+ type: array
+ resourceVersion:
+ description: |-
+ An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.
+
+ Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ selfLink:
+ description: SelfLink is a URL representing this object.
+ Populated by the system. Read-only.
+ type: string
+ uid:
+ description: |-
+ UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.
+
+ Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+ type: string
+ spec:
+ description: PersistentVolumeClaimSpec describes the common
+ attributes of storage devices and allows a Source for provider-specific
+ attributes
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes
+ the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: TypedLocalObjectReference contains enough information
+ to let you locate the typed referenced object inside the
+ same namespace.
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource
+ being referenced. If APIGroup is not specified, the
+ specified Kind must be in the core API group. For
+ any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ resources:
+ description: ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of
+ compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ description: 'Requests describes the minimum amount
+ of compute resources required. If Requests is omitted
+ for a container, it defaults to Limits if that is
+ explicitly specified, otherwise to an implementation-defined
+ value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ selector:
+ description: A label selector is a label query over a set
+ of resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects.
+ A null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be empty.
+ This array is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field
+ is "key", the operator is "In", and the values array
+ contains only "value". The requirements are ANDed.
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required
+ by the claim. Value of Filesystem is implied when not
+ included in claim spec. This is an alpha feature and may
+ change in the future.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the
+ PersistentVolume backing this claim.
+ type: string
+ status:
+ description: PersistentVolumeClaimStatus is the current status
+ of a persistent volume claim.
+ properties:
+ accessModes:
+ description: 'AccessModes contains the actual access modes
+ the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ capacity:
+ description: Represents the actual resources of the underlying
+ volume.
+ type: object
+ conditions:
+ description: Current Condition of persistent volume claim.
+ If underlying persistent volume is being resized then
+ the Condition will be set to 'ResizeStarted'.
+ items:
+ description: PersistentVolumeClaimCondition contails details
+ about state of pvc
+ properties:
+ lastProbeTime:
+ description: Time is a wrapper around time.Time which
+ supports correct marshaling to YAML and JSON. Wrappers
+ are provided for many of the factory methods that
+ the time package offers.
+ format: date-time
+ type: string
+ lastTransitionTime:
+ description: Time is a wrapper around time.Time which
+ supports correct marshaling to YAML and JSON. Wrappers
+ are provided for many of the factory methods that
+ the time package offers.
+ format: date-time
+ type: string
+ message:
+ description: Human-readable message indicating details
+ about last transition.
+ type: string
+ reason:
+ description: Unique, this should be a short, machine
+ understandable string that gives the reason for
+ condition's last transition. If it reports "ResizeStarted"
+ that means the underlying persistent volume is being
+ resized.
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ - status
+ type: array
+ phase:
+ description: Phase represents the current phase of PersistentVolumeClaim.
+ type: string
+ tag:
+ description: Tag of Alertmanager container image to be deployed. Defaults
+ to the value of `version`. Version is ignored if Tag is set.
+ type: string
+ tolerations:
+ description: If specified, the pod's tolerations.
+ items:
+ description: The pod this Toleration is attached to tolerates any
+ taint that matches the triple <key,value,effect> using the matching
+ operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty, operator
+ must be Exists; this combination means to match all values and
+ all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal. Exists
+ is equivalent to wildcard for value, so that a pod can tolerate
+ all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the
+ toleration (which must be of effect NoExecute, otherwise this
+ field is ignored) tolerates the taint. By default, it is not
+ set, which means tolerate the taint forever (do not evict).
+ Zero and negative values will be treated as 0 (evict immediately)
+ by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise
+ just a regular string.
+ type: string
+ type: array
+ version:
+ description: Version the cluster should be on.
+ type: string
+ status:
+ description: 'AlertmanagerStatus is the most recent observed status of the
+ Alertmanager cluster. Read-only. Not included when requesting from the
+ apiserver, only from the Prometheus Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status'
+ properties:
+ availableReplicas:
+ description: Total number of available pods (ready for at least minReadySeconds)
+ targeted by this Alertmanager cluster.
+ format: int32
+ type: integer
+ paused:
+ description: Represents whether any actions on the underlaying managed
+ objects are being performed. Only delete actions will be performed.
+ type: boolean
+ replicas:
+ description: Total number of non-terminated pods targeted by this Alertmanager
+ cluster (their labels match the selector).
+ format: int32
+ type: integer
+ unavailableReplicas:
+ description: Total number of unavailable pods targeted by this Alertmanager
+ cluster.
+ format: int32
+ type: integer
+ updatedReplicas:
+ description: Total number of non-terminated pods targeted by this Alertmanager
+ cluster that have the desired version spec.
+ format: int32
+ type: integer
+ required:
+ - paused
+ - replicas
+ - updatedReplicas
+ - availableReplicas
+ - unavailableReplicas
+ version: v1
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-prometheus.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-prometheus.yaml
new file mode 100644
index 00000000..0debca78
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-prometheus.yaml
@@ -0,0 +1,3178 @@
+{{- if and .Release.IsInstall .Values.prometheusOperator.enabled .Values.prometheusOperator.createCustomResource -}}
+# Source https://github.com/coreos/prometheus-operator/blob/master/contrib/kube-prometheus/manifests/0prometheus-operator-0prometheusCustomResourceDefinition.yaml
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: {{ printf "prometheuses.%s" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+ annotations:
+ "helm.sh/hook": crd-install
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ group: {{ .Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com" }}
+ names:
+ kind: Prometheus
+ plural: prometheuses
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: 'PrometheusSpec is a specification of the desired behavior
+ of the Prometheus cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status'
+ properties:
+ additionalAlertManagerConfigs:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must be a valid
+ secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must be defined
+ type: boolean
+ required:
+ - key
+ additionalAlertRelabelConfigs:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must be a valid
+ secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must be defined
+ type: boolean
+ required:
+ - key
+ additionalScrapeConfigs:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must be a valid
+ secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must be defined
+ type: boolean
+ required:
+ - key
+ affinity:
+ description: Affinity is a group of affinity scheduling rules.
+ properties:
+ nodeAffinity:
+ description: Node affinity is a group of node affinity scheduling
+ rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the affinity expressions specified by this field,
+ but it may choose a node that violates one or more of the
+ expressions. The node that is most preferred is the one with
+ the greatest sum of weights, i.e. for each node that meets
+ all of the scheduling requirements (resource request, requiredDuringScheduling
+ affinity expressions, etc.), compute a sum by iterating through
+ the elements of this field and adding "weight" to the sum
+ if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all
+ objects with implicit weight 0 (i.e. it's a no-op). A null
+ preferred scheduling term matches no objects (i.e. is also
+ a no-op).
+ properties:
+ preference:
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed. The
+ TopologySelectorTerm type implements a subset of the
+ NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ weight:
+ description: Weight associated with matching the corresponding
+ nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - preference
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: A node selector represents the union of the results
+ of one or more label queries over a set of nodes; that is,
+ it represents the OR of the selectors represented by the node
+ selector terms.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The
+ terms are ORed.
+ items:
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed. The
+ TopologySelectorTerm type implements a subset of the
+ NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ type: array
+ required:
+ - nodeSelectorTerms
+ podAffinity:
+ description: Pod affinity is a group of inter pod affinity scheduling
+ rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the affinity expressions specified by this field,
+ but it may choose a node that violates one or more of the
+ expressions. The node that is most preferred is the one with
+ the greatest sum of weights, i.e. for each node that meets
+ all of the scheduling requirements (resource request, requiredDuringScheduling
+ affinity expressions, etc.), compute a sum by iterating through
+ the elements of this field and adding "weight" to the sum
+ if the node has pods which matches the corresponding podAffinityTerm;
+ the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key <topologyKey> matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this
+ field are not met at scheduling time, the pod will not be
+ scheduled onto the node. If the affinity requirements specified
+ by this field cease to be met at some point during pod execution
+ (e.g. due to a pod label update), the system may or may not
+ try to eventually evict the pod from its node. When there
+ are multiple elements, the lists of nodes corresponding to
+ each podAffinityTerm are intersected, i.e. all terms must
+ be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s)) that
+ this pod should be co-located (affinity) or not co-located
+ (anti-affinity) with, where co-located is defined as running
+ on a node whose value of the label with key <topologyKey>
+ matches that of any node on which a pod of the set of pods
+ is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over a
+ set of resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects.
+ A null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator is
+ Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field
+ is "key", the operator is "In", and the values array
+ contains only "value". The requirements are ANDed.
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the
+ labelSelector applies to (matches against); null or
+ empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where
+ co-located is defined as running on a node whose value
+ of the label with key topologyKey matches that of any
+ node on which any of the selected pods is running. Empty
+ topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: array
+ podAntiAffinity:
+ description: Pod anti affinity is a group of inter pod anti affinity
+ scheduling rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the anti-affinity expressions specified by this
+ field, but it may choose a node that violates one or more
+ of the expressions. The node that is most preferred is the
+ one with the greatest sum of weights, i.e. for each node that
+ meets all of the scheduling requirements (resource request,
+ requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field
+ and adding "weight" to the sum if the node has pods which
+ matches the corresponding podAffinityTerm; the node(s) with
+ the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key <topologyKey> matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by
+ this field are not met at scheduling time, the pod will not
+ be scheduled onto the node. If the anti-affinity requirements
+ specified by this field cease to be met at some point during
+ pod execution (e.g. due to a pod label update), the system
+ may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding
+ to each podAffinityTerm are intersected, i.e. all terms must
+ be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s)) that
+ this pod should be co-located (affinity) or not co-located
+ (anti-affinity) with, where co-located is defined as running
+ on a node whose value of the label with key <topologyKey>
+ matches that of any node on which a pod of the set of pods
+ is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over a
+ set of resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects.
+ A null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator is
+ Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field
+ is "key", the operator is "In", and the values array
+ contains only "value". The requirements are ANDed.
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the
+ labelSelector applies to (matches against); null or
+ empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where
+ co-located is defined as running on a node whose value
+ of the label with key topologyKey matches that of any
+ node on which any of the selected pods is running. Empty
+ topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: array
+ alerting:
+ description: AlertingSpec defines parameters for alerting configuration
+ of Prometheus servers.
+ properties:
+ alertmanagers:
+ description: AlertmanagerEndpoints Prometheus should fire alerts
+ against.
+ items:
+ description: AlertmanagerEndpoints defines a selection of a single
+ Endpoints object containing alertmanager IPs to fire alerts
+ against.
+ properties:
+ bearerTokenFile:
+ description: BearerTokenFile to read from filesystem to use
+ when authenticating to Alertmanager.
+ type: string
+ name:
+ description: Name of Endpoints object in Namespace.
+ type: string
+ namespace:
+ description: Namespace of Endpoints object.
+ type: string
+ pathPrefix:
+ description: Prefix for the HTTP path alerts are pushed to.
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ scheme:
+ description: Scheme to use when firing alerts.
+ type: string
+ tlsConfig:
+ description: TLSConfig specifies TLS configuration parameters.
+ properties:
+ caFile:
+ description: The CA cert to use for the targets.
+ type: string
+ certFile:
+ description: The client cert file for the targets.
+ type: string
+ insecureSkipVerify:
+ description: Disable target certificate validation.
+ type: boolean
+ keyFile:
+ description: The client key file for the targets.
+ type: string
+ serverName:
+ description: Used to verify the hostname for the targets.
+ type: string
+ required:
+ - namespace
+ - name
+ - port
+ type: array
+ required:
+ - alertmanagers
+ apiserverConfig:
+ description: 'APIServerConfig defines a host and auth methods to access
+ apiserver. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config'
+ properties:
+ basicAuth:
+ description: 'BasicAuth allow an endpoint to authenticate over basic
+ authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints'
+ properties:
+ password:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must
+ be defined
+ type: boolean
+ required:
+ - key
+ username:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must
+ be defined
+ type: boolean
+ required:
+ - key
+ bearerToken:
+ description: Bearer token for accessing apiserver.
+ type: string
+ bearerTokenFile:
+ description: File to read bearer token for accessing apiserver.
+ type: string
+ host:
+ description: Host of apiserver. A valid string consisting of a hostname
+ or IP followed by an optional port number
+ type: string
+ tlsConfig:
+ description: TLSConfig specifies TLS configuration parameters.
+ properties:
+ caFile:
+ description: The CA cert to use for the targets.
+ type: string
+ certFile:
+ description: The client cert file for the targets.
+ type: string
+ insecureSkipVerify:
+ description: Disable target certificate validation.
+ type: boolean
+ keyFile:
+ description: The client key file for the targets.
+ type: string
+ serverName:
+ description: Used to verify the hostname for the targets.
+ type: string
+ required:
+ - host
+ baseImage:
+ description: Base image to use for a Prometheus deployment.
+ type: string
+ configMaps:
+ description: ConfigMaps is a list of ConfigMaps in the same namespace
+ as the Prometheus object, which shall be mounted into the Prometheus
+ Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/<configmap-name>.
+ items:
+ type: string
+ type: array
+ containers:
+ description: Containers allows injecting additional containers. This
+ is meant to allow adding an authentication proxy to a Prometheus pod.
+ items:
+ description: A single application container that you want to run within
+ a pod.
+ properties:
+ args:
+ description: 'Arguments to the entrypoint. The docker image''s
+ CMD is used if this is not provided. Variable references $(VAR_NAME)
+ are expanded using the container''s environment. If a variable
+ cannot be resolved, the reference in the input string will be
+ unchanged. The $(VAR_NAME) syntax can be escaped with a double
+ $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
+ regardless of whether the variable exists or not. Cannot be
+ updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
+ items:
+ type: string
+ type: array
+ command:
+ description: 'Entrypoint array. Not executed within a shell. The
+ docker image''s ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container''s
+ environment. If a variable cannot be resolved, the reference
+ in the input string will be unchanged. The $(VAR_NAME) syntax
+ can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references
+ will never be expanded, regardless of whether the variable exists
+ or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
+ items:
+ type: string
+ type: array
+ env:
+ description: List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
+ C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previous defined environment variables in the
+ container and any service environment variables. If a
+ variable cannot be resolved, the reference in the input
+ string will be unchanged. The $(VAR_NAME) syntax can be
+ escaped with a double $$, ie: $$(VAR_NAME). Escaped references
+ will never be expanded, regardless of whether the variable
+ exists or not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: EnvVarSource represents a source for the value
+ of an EnvVar.
+ properties:
+ configMapKeyRef:
+ description: Selects a key from a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or it's
+ key must be defined
+ type: boolean
+ required:
+ - key
+ fieldRef:
+ description: ObjectFieldSelector selects an APIVersioned
+ field of an object.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ resourceFieldRef:
+ description: ResourceFieldSelector represents container
+ resources (cpu, memory) and their output format
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor: {}
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ secretKeyRef:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's
+ key must be defined
+ type: boolean
+ required:
+ - key
+ required:
+ - name
+ type: array
+ envFrom:
+ description: List of sources to populate environment variables
+ in the container. The keys defined within a source must be a
+ C_IDENTIFIER. All invalid keys will be reported as an event
+ when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take
+ precedence. Values defined by an Env with a duplicate key will
+ take precedence. Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of a set of
+ ConfigMaps
+ properties:
+ configMapRef:
+ description: |-
+ ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.
+
+ The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap must be defined
+ type: boolean
+ prefix:
+ description: An optional identifier to prepend to each key
+ in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: |-
+ SecretEnvSource selects a Secret to populate the environment variables with.
+
+ The contents of the target Secret's Data field will represent the key-value pairs as environment variables.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret must be defined
+ type: boolean
+ type: array
+ image:
+ description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management
+ to default or override container images in workload controllers
+ like Deployments and StatefulSets.'
+ type: string
+ imagePullPolicy:
+ description: 'Image pull policy. One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent
+ otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images'
+ type: string
+ lifecycle:
+ description: Lifecycle describes actions that the management system
+ should take in response to container lifecycle events. For the
+ PostStart and PreStop lifecycle handlers, management of the
+ container blocks until the action is complete, unless the container
+ process fails, in which case the handler is aborted.
+ properties:
+ postStart:
+ description: Handler defines a specific action that should
+ be taken
+ properties:
+ exec:
+ description: ExecAction describes a "run in container"
+ action.
+ properties:
+ command:
+ description: Command is the command line to execute
+ inside the container, the working directory for
+ the command is root ('/') in the container's filesystem.
+ The command is simply exec'd, it is not run inside
+ a shell, so traditional shell instructions ('|',
+ etc) won't work. To use a shell, you need to explicitly
+ call out to that shell. Exit status of 0 is treated
+ as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ httpGet:
+ description: HTTPGetAction describes an action based on
+ HTTP Get requests.
+ properties:
+ host:
+ description: Host name to connect to, defaults to
+ the pod IP. You probably want to set "Host" in httpHeaders
+ instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header
+ to be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ scheme:
+ description: Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ tcpSocket:
+ description: TCPSocketAction describes an action based
+ on opening a socket
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults
+ to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ required:
+ - port
+ preStop:
+ description: Handler defines a specific action that should
+ be taken
+ properties:
+ exec:
+ description: ExecAction describes a "run in container"
+ action.
+ properties:
+ command:
+ description: Command is the command line to execute
+ inside the container, the working directory for
+ the command is root ('/') in the container's filesystem.
+ The command is simply exec'd, it is not run inside
+ a shell, so traditional shell instructions ('|',
+ etc) won't work. To use a shell, you need to explicitly
+ call out to that shell. Exit status of 0 is treated
+ as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ httpGet:
+ description: HTTPGetAction describes an action based on
+ HTTP Get requests.
+ properties:
+ host:
+ description: Host name to connect to, defaults to
+ the pod IP. You probably want to set "Host" in httpHeaders
+ instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header
+ to be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ scheme:
+ description: Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ tcpSocket:
+ description: TCPSocketAction describes an action based
+ on opening a socket
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults
+ to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ required:
+ - port
+ livenessProbe:
+ description: Probe describes a health check to be performed against
+ a container to determine whether it is alive or ready to receive
+ traffic.
+ properties:
+ exec:
+ description: ExecAction describes a "run in container" action.
+ properties:
+ command:
+ description: Command is the command line to execute inside
+ the container, the working directory for the command is
+ root ('/') in the container's filesystem. The command
+ is simply exec'd, it is not run inside a shell, so traditional
+ shell instructions ('|', etc) won't work. To use a shell,
+ you need to explicitly call out to that shell. Exit
+ status of 0 is treated as live/healthy and non-zero
+ is unhealthy.
+ items:
+ type: string
+ type: array
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to
+ be considered failed after having succeeded. Defaults to
+ 3. Minimum value is 1.
+ format: int32
+ type: integer
+ httpGet:
+ description: HTTPGetAction describes an action based on HTTP
+ Get requests.
+ properties:
+ host:
+ description: Host name to connect to, defaults to the
+ pod IP. You probably want to set "Host" in httpHeaders
+ instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP
+ allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to
+ be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ scheme:
+ description: Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ initialDelaySeconds:
+ description: 'Number of seconds after the container has started
+ before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the probe to
+ be considered successful after having failed. Defaults to
+ 1. Must be 1 for liveness. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocketAction describes an action based on
+ opening a socket
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults
+ to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ required:
+ - port
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe times
+ out. Defaults to 1 second. Minimum value is 1. More info:
+ https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ name:
+ description: Name of the container specified as a DNS_LABEL. Each
+ container in a pod must have a unique name (DNS_LABEL). Cannot
+ be updated.
+ type: string
+ ports:
+ description: List of ports to expose from the container. Exposing
+ a port here gives the system additional information about the
+ network connections a container uses, but is primarily informational.
+ Not specifying a port here DOES NOT prevent that port from being
+ exposed. Any port which is listening on the default "0.0.0.0"
+ address inside a container will be accessible from the network.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port in a single
+ container.
+ properties:
+ containerPort:
+ description: Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external port to.
+ type: string
+ hostPort:
+ description: Number of port to expose on the host. If specified,
+ this must be a valid port number, 0 < x < 65536. If HostNetwork
+ is specified, this must match ContainerPort. Most containers
+ do not need this.
+ format: int32
+ type: integer
+ name:
+ description: If specified, this must be an IANA_SVC_NAME
+ and unique within the pod. Each named port in a pod must
+ have a unique name. Name for the port that can be referred
+ to by services.
+ type: string
+ protocol:
+ description: Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: array
+ readinessProbe:
+ description: Probe describes a health check to be performed against
+ a container to determine whether it is alive or ready to receive
+ traffic.
+ properties:
+ exec:
+ description: ExecAction describes a "run in container" action.
+ properties:
+ command:
+ description: Command is the command line to execute inside
+ the container, the working directory for the command is
+ root ('/') in the container's filesystem. The command
+ is simply exec'd, it is not run inside a shell, so traditional
+ shell instructions ('|', etc) won't work. To use a shell,
+ you need to explicitly call out to that shell. Exit
+ status of 0 is treated as live/healthy and non-zero
+ is unhealthy.
+ items:
+ type: string
+ type: array
+ failureThreshold:
+ description: Minimum consecutive failures for the probe to
+ be considered failed after having succeeded. Defaults to
+ 3. Minimum value is 1.
+ format: int32
+ type: integer
+ httpGet:
+ description: HTTPGetAction describes an action based on HTTP
+ Get requests.
+ properties:
+ host:
+ description: Host name to connect to, defaults to the
+ pod IP. You probably want to set "Host" in httpHeaders
+ instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP
+ allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to
+ be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ scheme:
+ description: Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ initialDelaySeconds:
+ description: 'Number of seconds after the container has started
+ before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the probe to
+ be considered successful after having failed. Defaults to
+ 1. Must be 1 for liveness. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocketAction describes an action based on
+ opening a socket
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults
+ to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: string
+ - type: integer
+ required:
+ - port
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe times
+ out. Defaults to 1 second. Minimum value is 1. More info:
+ https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ securityContext:
+ description: SecurityContext holds security configuration that
+ will be applied to a container. Some fields are present in both
+ SecurityContext and PodSecurityContext. When both are set,
+ the values in SecurityContext take precedence.
+ properties:
+ allowPrivilegeEscalation:
+ description: 'AllowPrivilegeEscalation controls whether a
+ process can gain more privileges than its parent process.
+ This bool directly controls if the no_new_privs flag will
+ be set on the container process. AllowPrivilegeEscalation
+ is true always when the container is: 1) run as Privileged
+ 2) has CAP_SYS_ADMIN'
+ type: boolean
+ capabilities:
+ description: Adds and removes POSIX capabilities from running
+ containers.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ type: string
+ type: array
+ drop:
+ description: Removed capabilities
+ items:
+ type: string
+ type: array
+ privileged:
+ description: Run container in privileged mode. Processes in
+ privileged containers are essentially equivalent to root
+ on the host. Defaults to false.
+ type: boolean
+ procMount:
+ description: procMount denotes the type of proc mount to use
+ for the containers. The default is DefaultProcMount which
+ uses the container runtime defaults for readonly paths and
+ masked paths. This requires the ProcMountType feature flag
+ to be enabled.
+ type: string
+ readOnlyRootFilesystem:
+ description: Whether this container has a read-only root filesystem.
+ Default is false.
+ type: boolean
+ runAsGroup:
+ description: The GID to run the entrypoint of the container
+ process. Uses runtime default if unset. May also be set
+ in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext
+ takes precedence.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: Indicates that the container must run as a non-root
+ user. If true, the Kubelet will validate the image at runtime
+ to ensure that it does not run as UID 0 (root) and fail
+ to start the container if it does. If unset or false, no
+ such validation will be performed. May also be set in PodSecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the
+ value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: The UID to run the entrypoint of the container
+ process. Defaults to user specified in image metadata if
+ unspecified. May also be set in PodSecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the
+ value specified in SecurityContext takes precedence.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: SELinuxOptions are the labels to be applied to
+ the container
+ properties:
+ level:
+ description: Level is SELinux level label that applies
+ to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies
+ to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies
+ to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies
+ to the container.
+ type: string
+ stdin:
+ description: Whether this container should allocate a buffer for
+ stdin in the container runtime. If this is not set, reads from
+ stdin in the container will always result in EOF. Default is
+ false.
+ type: boolean
+ stdinOnce:
+ description: Whether the container runtime should close the stdin
+ channel after it has been opened by a single attach. When stdin
+ is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container
+ start, is empty until the first client attaches to stdin, and
+ then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container
+ is restarted. If this flag is false, a container processes that
+ reads from stdin will never receive an EOF. Default is false
+ type: boolean
+ terminationMessagePath:
+ description: 'Optional: Path at which the file to which the container''s
+ termination message will be written is mounted into the container''s
+ filesystem. Message written is intended to be brief final status,
+ such as an assertion failure message. Will be truncated by the
+ node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb. Defaults to /dev/termination-log.
+ Cannot be updated.'
+ type: string
+ terminationMessagePolicy:
+ description: Indicate how the termination message should be populated.
+ File will use the contents of terminationMessagePath to populate
+ the container status message on both success and failure. FallbackToLogsOnError
+ will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever
+ is smaller. Defaults to File. Cannot be updated.
+ type: string
+ tty:
+ description: Whether this container should allocate a TTY for
+ itself, also requires 'stdin' to be true. Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices to be
+ used by the container. This is an alpha feature and may change
+ in the future.
+ items:
+ description: volumeDevice describes a mapping of a raw block
+ device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of the container
+ that the device will be mapped to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - name
+ - devicePath
+ type: array
+ volumeMounts:
+ description: Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a Volume within
+ a container.
+ properties:
+ mountPath:
+ description: Path within the container at which the volume
+ should be mounted. Must not contain ':'.
+ type: string
+ mountPropagation:
+ description: mountPropagation determines how mounts are
+ propagated from the host to container and the other way
+ around. When not set, MountPropagationNone is used. This
+ field is beta in 1.10.
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: Mounted read-only if true, read-write otherwise
+ (false or unspecified). Defaults to false.
+ type: boolean
+ subPath:
+ description: Path within the volume from which the container's
+ volume should be mounted. Defaults to "" (volume's root).
+ type: string
+ required:
+ - name
+ - mountPath
+ type: array
+ workingDir:
+ description: Container's working directory. If not specified,
+ the container runtime's default will be used, which might be
+ configured in the container image. Cannot be updated.
+ type: string
+ required:
+ - name
+ type: array
+ evaluationInterval:
+ description: Interval between consecutive evaluations.
+ type: string
+ externalLabels:
+ description: The labels to add to any time series or alerts when communicating
+ with external systems (federation, remote storage, Alertmanager).
+ type: object
+ externalUrl:
+ description: The external URL the Prometheus instances will be available
+ under. This is necessary to generate correct URLs. This is necessary
+ if Prometheus is not served from root of a DNS name.
+ type: string
+ imagePullSecrets:
+ description: An optional list of references to secrets in the same namespace
+ to use for pulling prometheus and alertmanager images from registries
+ see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
+ items:
+ description: LocalObjectReference contains enough information to let
+ you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ type: array
+ listenLocal:
+ description: ListenLocal makes the Prometheus server listen on loopback,
+ so that it does not bind against the Pod IP.
+ type: boolean
+ logLevel:
+ description: Log level for Prometheus to be configured with.
+ type: string
+ nodeSelector:
+ description: Define which Nodes the Pods are scheduled on.
+ type: object
+ paused:
+ description: When a Prometheus deployment is paused, no actions except
+ for deletion will be performed on the underlying objects.
+ type: boolean
+ podMetadata:
+ description: ObjectMeta is metadata that all persisted resources must
+ have, which includes all objects users must create.
+ properties:
+ annotations:
+ description: 'Annotations is an unstructured key value map stored
+ with a resource that may be set by external tools to store and
+ retrieve arbitrary metadata. They are not queryable and should
+ be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
+ type: object
+ clusterName:
+ description: The name of the cluster which the object belongs to.
+ This is used to distinguish resources with same name and namespace
+ in different clusters. This field is not set anywhere right now
+ and apiserver is going to ignore it if set in create or update
+ request.
+ type: string
+ creationTimestamp:
+ description: Time is a wrapper around time.Time which supports correct
+ marshaling to YAML and JSON. Wrappers are provided for many of
+ the factory methods that the time package offers.
+ format: date-time
+ type: string
+ deletionGracePeriodSeconds:
+ description: Number of seconds allowed for this object to gracefully
+ terminate before it will be removed from the system. Only set
+ when deletionTimestamp is also set. May only be shortened. Read-only.
+ format: int64
+ type: integer
+ deletionTimestamp:
+ description: Time is a wrapper around time.Time which supports correct
+ marshaling to YAML and JSON. Wrappers are provided for many of
+ the factory methods that the time package offers.
+ format: date-time
+ type: string
+ finalizers:
+ description: Must be empty before the object is deleted from the
+ registry. Each entry is an identifier for the responsible component
+ that will remove the entry from the list. If the deletionTimestamp
+ of the object is non-nil, entries in this list can only be removed.
+ items:
+ type: string
+ type: array
+ generateName:
+ description: |-
+ GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.
+
+ If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).
+
+ Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
+ type: string
+ generation:
+ description: A sequence number representing a specific generation
+ of the desired state. Populated by the system. Read-only.
+ format: int64
+ type: integer
+ initializers:
+ description: Initializers tracks the progress of initialization.
+ properties:
+ pending:
+ description: Pending is a list of initializers that must execute
+ in order before this object is visible. When the last pending
+ initializer is removed, and no failing result is set, the
+ initializers struct will be set to nil and the object is considered
+ as initialized and visible to all clients.
+ items:
+ description: Initializer is information about an initializer
+ that has not yet completed.
+ properties:
+ name:
+ description: name of the process that is responsible for
+ initializing this object.
+ type: string
+ required:
+ - name
+ type: array
+ result:
+ description: Status is a return value for calls that don't return
+ other objects.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of
+ this representation of an object. Servers should convert
+ recognized schemas to the latest internal value, and may
+ reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ code:
+ description: Suggested HTTP return code for this status,
+ 0 if not set.
+ format: int32
+ type: integer
+ details:
+ description: StatusDetails is a set of additional properties
+ that MAY be set by the server to provide additional information
+ about a response. The Reason field of a Status object
+ defines what attributes will be set. Clients must ignore
+ fields that do not match the defined type of each attribute,
+ and should assume that any attribute may be empty, invalid,
+ or under defined.
+ properties:
+ causes:
+ description: The Causes array includes more details
+ associated with the StatusReason failure. Not all
+ StatusReasons may provide detailed causes.
+ items:
+ description: StatusCause provides more information
+ about an api.Status failure, including cases when
+ multiple errors are encountered.
+ properties:
+ field:
+ description: |-
+ The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.
+
+ Examples:
+ "name" - the field "name" on the current resource
+ "items[0].name" - the field "name" on the first array entry in "items"
+ type: string
+ message:
+ description: A human-readable description of the
+ cause of the error. This field may be presented
+ as-is to a reader.
+ type: string
+ reason:
+ description: A machine-readable description of
+ the cause of the error. If this value is empty
+ there is no information available.
+ type: string
+ type: array
+ group:
+ description: The group attribute of the resource associated
+ with the status StatusReason.
+ type: string
+ kind:
+ description: 'The kind attribute of the resource associated
+ with the status StatusReason. On some operations may
+ differ from the requested resource Kind. More info:
+ https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: The name attribute of the resource associated
+ with the status StatusReason (when there is a single
+ name which can be described).
+ type: string
+ retryAfterSeconds:
+ description: If specified, the time in seconds before
+ the operation should be retried. Some errors may indicate
+ the client must take an alternate action - for those
+ errors this field may indicate how long to wait before
+ taking the alternate action.
+ format: int32
+ type: integer
+ uid:
+ description: 'UID of the resource. (when there is a
+ single resource which can be described). More info:
+ http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST
+ resource this object represents. Servers may infer this
+ from the endpoint the client submits requests to. Cannot
+ be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ message:
+ description: A human-readable description of the status
+ of this operation.
+ type: string
+ metadata:
+ description: ListMeta describes metadata that synthetic
+ resources must have, including lists and various status
+ objects. A resource may have only one of {ObjectMeta,
+ ListMeta}.
+ properties:
+ continue:
+ description: continue may be set if the user set a limit
+ on the number of items returned, and indicates that
+ the server has more data available. The value is opaque
+ and may be used to issue another request to the endpoint
+ that served this list to retrieve the next set of
+ available objects. Continuing a consistent list may
+ not be possible if the server configuration has changed
+ or more than a few minutes have passed. The resourceVersion
+ field returned when using this continue value will
+ be identical to the value in the first response, unless
+ you have received this token from an error message.
+ type: string
+ resourceVersion:
+ description: 'String that identifies the server''s internal
+ version of this object that can be used by clients
+ to determine when objects have changed. Value must
+ be treated as opaque by clients and passed unmodified
+ back to the server. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ selfLink:
+ description: selfLink is a URL representing this object.
+ Populated by the system. Read-only.
+ type: string
+ reason:
+ description: A machine-readable description of why this
+ operation is in the "Failure" status. If this value is
+ empty there is no information available. A Reason clarifies
+ an HTTP status code but does not override it.
+ type: string
+ status:
+ description: 'Status of the operation. One of: "Success"
+ or "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
+ type: string
+ required:
+ - pending
+ labels:
+ description: 'Map of string keys and values that can be used to
+ organize and categorize (scope and select) objects. May match
+ selectors of replication controllers and services. More info:
+ http://kubernetes.io/docs/user-guide/labels'
+ type: object
+ name:
+ description: 'Name must be unique within a namespace. Is required
+ when creating resources, although some resources may allow a client
+ to request the generation of an appropriate name automatically.
+ Name is primarily intended for creation idempotence and configuration
+ definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ namespace:
+ description: |-
+ Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.
+
+ Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces
+ type: string
+ ownerReferences:
+ description: List of objects depended by this object. If ALL objects
+ in the list have been deleted, this object will be garbage collected.
+ If this object is managed by a controller, then an entry in this
+ list will point to this controller, with the controller field
+ set to true. There cannot be more than one managing controller.
+ items:
+ description: OwnerReference contains enough information to let
+ you identify an owning object. Currently, an owning object must
+ be in the same namespace, so there is no namespace field.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ blockOwnerDeletion:
+ description: If true, AND if the owner has the "foregroundDeletion"
+ finalizer, then the owner cannot be deleted from the key-value
+ store until this reference is removed. Defaults to false.
+ To set this field, a user needs "delete" permission of the
+ owner, otherwise 422 (Unprocessable Entity) will be returned.
+ type: boolean
+ controller:
+ description: If true, this reference points to the managing
+ controller.
+ type: boolean
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ required:
+ - apiVersion
+ - kind
+ - name
+ - uid
+ type: array
+ resourceVersion:
+ description: |-
+ An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.
+
+ Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ selfLink:
+ description: SelfLink is a URL representing this object. Populated
+ by the system. Read-only.
+ type: string
+ uid:
+ description: |-
+ UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.
+
+ Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+ type: string
+ priorityClassName:
+ description: Priority class assigned to the Pods
+ type: string
+ remoteRead:
+ description: If specified, the remote_read spec. This is an experimental
+ feature, it may change in any upcoming release in a breaking way.
+ items:
+ description: RemoteReadSpec defines the remote_read configuration
+ for prometheus.
+ properties:
+ basicAuth:
+ description: 'BasicAuth allow an endpoint to authenticate over
+ basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints'
+ properties:
+ password:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must
+ be defined
+ type: boolean
+ required:
+ - key
+ username:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must
+ be defined
+ type: boolean
+ required:
+ - key
+ bearerToken:
+ description: bearer token for remote read.
+ type: string
+ bearerTokenFile:
+ description: File to read bearer token for remote read.
+ type: string
+ proxyUrl:
+ description: Optional ProxyURL
+ type: string
+ readRecent:
+ description: Whether reads should be made for queries for time
+ ranges that the local storage should have complete data for.
+ type: boolean
+ remoteTimeout:
+ description: Timeout for requests to the remote read endpoint.
+ type: string
+ requiredMatchers:
+ description: An optional list of equality matchers which have
+ to be present in a selector to query the remote read endpoint.
+ type: object
+ tlsConfig:
+ description: TLSConfig specifies TLS configuration parameters.
+ properties:
+ caFile:
+ description: The CA cert to use for the targets.
+ type: string
+ certFile:
+ description: The client cert file for the targets.
+ type: string
+ insecureSkipVerify:
+ description: Disable target certificate validation.
+ type: boolean
+ keyFile:
+ description: The client key file for the targets.
+ type: string
+ serverName:
+ description: Used to verify the hostname for the targets.
+ type: string
+ url:
+ description: The URL of the endpoint to send samples to.
+ type: string
+ required:
+ - url
+ type: array
+ remoteWrite:
+ description: If specified, the remote_write spec. This is an experimental
+ feature, it may change in any upcoming release in a breaking way.
+ items:
+ description: RemoteWriteSpec defines the remote_write configuration
+ for prometheus.
+ properties:
+ basicAuth:
+ description: 'BasicAuth allow an endpoint to authenticate over
+ basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints'
+ properties:
+ password:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must
+ be defined
+ type: boolean
+ required:
+ - key
+ username:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must
+ be defined
+ type: boolean
+ required:
+ - key
+ bearerToken:
+ description: File to read bearer token for remote write.
+ type: string
+ bearerTokenFile:
+ description: File to read bearer token for remote write.
+ type: string
+ proxyUrl:
+ description: Optional ProxyURL
+ type: string
+ queueConfig:
+ description: QueueConfig allows the tuning of remote_write queue_config
+ parameters. This object is referenced in the RemoteWriteSpec
+ object.
+ properties:
+ batchSendDeadline:
+ description: BatchSendDeadline is the maximum time a sample
+ will wait in buffer.
+ type: string
+ capacity:
+ description: Capacity is the number of samples to buffer per
+ shard before we start dropping them.
+ format: int32
+ type: integer
+ maxBackoff:
+ description: MaxBackoff is the maximum retry delay.
+ type: string
+ maxRetries:
+ description: MaxRetries is the maximum number of times to
+ retry a batch on recoverable errors.
+ format: int32
+ type: integer
+ maxSamplesPerSend:
+ description: MaxSamplesPerSend is the maximum number of samples
+ per send.
+ format: int32
+ type: integer
+ maxShards:
+ description: MaxShards is the maximum number of shards, i.e.
+ amount of concurrency.
+ format: int32
+ type: integer
+ minBackoff:
+ description: MinBackoff is the initial retry delay. Gets doubled
+ for every retry.
+ type: string
+ remoteTimeout:
+ description: Timeout for requests to the remote write endpoint.
+ type: string
+ tlsConfig:
+ description: TLSConfig specifies TLS configuration parameters.
+ properties:
+ caFile:
+ description: The CA cert to use for the targets.
+ type: string
+ certFile:
+ description: The client cert file for the targets.
+ type: string
+ insecureSkipVerify:
+ description: Disable target certificate validation.
+ type: boolean
+ keyFile:
+ description: The client key file for the targets.
+ type: string
+ serverName:
+ description: Used to verify the hostname for the targets.
+ type: string
+ url:
+ description: The URL of the endpoint to send samples to.
+ type: string
+ writeRelabelConfigs:
+ description: The list of remote write relabel configurations.
+ items:
+ description: 'RelabelConfig allows dynamic rewriting of the
+ label set, being applied to samples before ingestion. It defines
+ `<metric_relabel_configs>`-section of Prometheus configuration.
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
+ properties:
+ action:
+ description: Action to perform based on regex matching.
+ Default is 'replace'
+ type: string
+ modulus:
+ description: Modulus to take of the hash of the source label
+ values.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched. defailt is '(.*)'
+ type: string
+ replacement:
+ description: Replacement value against which a regex replace
+ is performed if the regular expression matches. Regex
+ capture groups are available. Default is '$1'
+ type: string
+ separator:
+ description: Separator placed between concatenated source
+ label values. default is ';'.
+ type: string
+ sourceLabels:
+ description: The source labels select values from existing
+ labels. Their content is concatenated using the configured
+ separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
+ items:
+ type: string
+ type: array
+ targetLabel:
+ description: Label to which the resulting value is written
+ in a replace action. It is mandatory for replace actions.
+ Regex capture groups are available.
+ type: string
+ type: array
+ required:
+ - url
+ type: array
+ replicas:
+ description: Number of instances to deploy for a Prometheus deployment.
+ format: int32
+ type: integer
+ resources:
+ description: ResourceRequirements describes the compute resource requirements.
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of compute resources
+ allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ description: 'Requests describes the minimum amount of compute resources
+ required. If Requests is omitted for a container, it defaults
+ to Limits if that is explicitly specified, otherwise to an implementation-defined
+ value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ retention:
+ description: Time duration Prometheus shall retain data for. Default
+ is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)`
+ (milliseconds seconds minutes hours days weeks years).
+ type: string
+ routePrefix:
+ description: The route prefix Prometheus registers HTTP handlers for.
+ This is useful, if using ExternalURL and a proxy is rewriting HTTP
+ routes of a request, and the actual ExternalURL is still true, but
+ the server serves requests under a different route prefix. For example
+ for use with `kubectl proxy`.
+ type: string
+ ruleNamespaceSelector:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains
+ values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a
+ set of values. Valid operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator
+ is In or NotIn, the values array must be non-empty. If the
+ operator is Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator is
+ "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ ruleSelector:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains
+ values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a
+ set of values. Valid operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator
+ is In or NotIn, the values array must be non-empty. If the
+ operator is Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator is
+ "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ scrapeInterval:
+ description: Interval between consecutive scrapes.
+ type: string
+ secrets:
+ description: Secrets is a list of Secrets in the same namespace as the
+ Prometheus object, which shall be mounted into the Prometheus Pods.
+ The Secrets are mounted into /etc/prometheus/secrets/<secret-name>.
+ items:
+ type: string
+ type: array
+ securityContext:
+ description: PodSecurityContext holds pod-level security attributes
+ and common container settings. Some fields are also present in container.securityContext. Field
+ values of container.securityContext take precedence over field values
+ of PodSecurityContext.
+ properties:
+ fsGroup:
+ description: |-
+ A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
+
+ 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----
+
+ If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ format: int64
+ type: integer
+ runAsGroup:
+ description: The GID to run the entrypoint of the container process.
+ Uses runtime default if unset. May also be set in SecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the value
+ specified in SecurityContext takes precedence for that container.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: Indicates that the container must run as a non-root
+ user. If true, the Kubelet will validate the image at runtime
+ to ensure that it does not run as UID 0 (root) and fail to start
+ the container if it does. If unset or false, no such validation
+ will be performed. May also be set in SecurityContext. If set
+ in both SecurityContext and PodSecurityContext, the value specified
+ in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified. May
+ also be set in SecurityContext. If set in both SecurityContext
+ and PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: SELinuxOptions are the labels to be applied to the
+ container
+ properties:
+ level:
+ description: Level is SELinux level label that applies to the
+ container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies to the
+ container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies to the
+ container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies to the
+ container.
+ type: string
+ supplementalGroups:
+ description: A list of groups applied to the first process run in
+ each container, in addition to the container's primary GID. If
+ unspecified, no groups will be added to any container.
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ description: Sysctls hold a list of namespaced sysctls used for
+ the pod. Pods with unsupported sysctls (by the container runtime)
+ might fail to launch.
+ items:
+ description: Sysctl defines a kernel parameter to be set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: array
+ serviceAccountName:
+ description: ServiceAccountName is the name of the ServiceAccount to
+ use to run the Prometheus Pods.
+ type: string
+ serviceMonitorNamespaceSelector:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains
+ values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a
+ set of values. Valid operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator
+ is In or NotIn, the values array must be non-empty. If the
+ operator is Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator is
+ "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ serviceMonitorSelector:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains
+ values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a
+ set of values. Valid operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator
+ is In or NotIn, the values array must be non-empty. If the
+ operator is Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator is
+ "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ sha:
+ description: SHA of Prometheus container image to be deployed. Defaults
+ to the value of `version`. Similar to a tag, but the SHA explicitly
+ deploys an immutable container image. Version and Tag are ignored
+ if SHA is set.
+ type: string
+ storage:
+ description: StorageSpec defines the configured storage for a group
+ Prometheus servers. If neither `emptyDir` nor `volumeClaimTemplate`
+ is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir)
+ will be used.
+ properties:
+ class:
+ description: 'Name of the StorageClass to use when requesting storage
+ provisioning. More info: https://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses
+ (DEPRECATED - instead use `volumeClaimTemplate.spec.storageClassName`)'
+ type: string
+ emptyDir:
+ description: Represents an empty directory for a pod. Empty directory
+ volumes support ownership management and SELinux relabeling.
+ properties:
+ medium:
+ description: 'What type of storage medium should back this directory.
+ The default is "" which means to use the node''s default medium.
+ Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
+ type: string
+ sizeLimit: {}
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ selector:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that
+ contains values, a key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn, Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the
+ operator is In or NotIn, the values array must be non-empty.
+ If the operator is Exists or DoesNotExist, the values
+ array must be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator
+ is "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ volumeClaimTemplate:
+ description: PersistentVolumeClaim is a user's request for and claim
+ to a persistent volume
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this
+ representation of an object. Servers should convert recognized
+ schemas to the latest internal value, and may reject unrecognized
+ values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource
+ this object represents. Servers may infer this from the endpoint
+ the client submits requests to. Cannot be updated. In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ description: ObjectMeta is metadata that all persisted resources
+ must have, which includes all objects users must create.
+ properties:
+ annotations:
+ description: 'Annotations is an unstructured key value map
+ stored with a resource that may be set by external tools
+ to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations'
+ type: object
+ clusterName:
+ description: The name of the cluster which the object belongs
+ to. This is used to distinguish resources with same name
+ and namespace in different clusters. This field is not
+ set anywhere right now and apiserver is going to ignore
+ it if set in create or update request.
+ type: string
+ creationTimestamp:
+ description: Time is a wrapper around time.Time which supports
+ correct marshaling to YAML and JSON. Wrappers are provided
+ for many of the factory methods that the time package
+ offers.
+ format: date-time
+ type: string
+ deletionGracePeriodSeconds:
+ description: Number of seconds allowed for this object to
+ gracefully terminate before it will be removed from the
+ system. Only set when deletionTimestamp is also set. May
+ only be shortened. Read-only.
+ format: int64
+ type: integer
+ deletionTimestamp:
+ description: Time is a wrapper around time.Time which supports
+ correct marshaling to YAML and JSON. Wrappers are provided
+ for many of the factory methods that the time package
+ offers.
+ format: date-time
+ type: string
+ finalizers:
+ description: Must be empty before the object is deleted
+ from the registry. Each entry is an identifier for the
+ responsible component that will remove the entry from
+ the list. If the deletionTimestamp of the object is non-nil,
+ entries in this list can only be removed.
+ items:
+ type: string
+ type: array
+ generateName:
+ description: |-
+ GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.
+
+ If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).
+
+ Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
+ type: string
+ generation:
+ description: A sequence number representing a specific generation
+ of the desired state. Populated by the system. Read-only.
+ format: int64
+ type: integer
+ initializers:
+ description: Initializers tracks the progress of initialization.
+ properties:
+ pending:
+ description: Pending is a list of initializers that
+ must execute in order before this object is visible.
+ When the last pending initializer is removed, and
+ no failing result is set, the initializers struct
+ will be set to nil and the object is considered as
+ initialized and visible to all clients.
+ items:
+ description: Initializer is information about an initializer
+ that has not yet completed.
+ properties:
+ name:
+ description: name of the process that is responsible
+ for initializing this object.
+ type: string
+ required:
+ - name
+ type: array
+ result:
+ description: Status is a return value for calls that
+ don't return other objects.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema
+ of this representation of an object. Servers should
+ convert recognized schemas to the latest internal
+ value, and may reject unrecognized values. More
+ info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ code:
+ description: Suggested HTTP return code for this
+ status, 0 if not set.
+ format: int32
+ type: integer
+ details:
+ description: StatusDetails is a set of additional
+ properties that MAY be set by the server to provide
+ additional information about a response. The Reason
+ field of a Status object defines what attributes
+ will be set. Clients must ignore fields that do
+ not match the defined type of each attribute,
+ and should assume that any attribute may be empty,
+ invalid, or under defined.
+ properties:
+ causes:
+ description: The Causes array includes more
+ details associated with the StatusReason failure.
+ Not all StatusReasons may provide detailed
+ causes.
+ items:
+ description: StatusCause provides more information
+ about an api.Status failure, including cases
+ when multiple errors are encountered.
+ properties:
+ field:
+ description: |-
+ The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.
+
+ Examples:
+ "name" - the field "name" on the current resource
+ "items[0].name" - the field "name" on the first array entry in "items"
+ type: string
+ message:
+ description: A human-readable description
+ of the cause of the error. This field
+ may be presented as-is to a reader.
+ type: string
+ reason:
+ description: A machine-readable description
+ of the cause of the error. If this value
+ is empty there is no information available.
+ type: string
+ type: array
+ group:
+ description: The group attribute of the resource
+ associated with the status StatusReason.
+ type: string
+ kind:
+ description: 'The kind attribute of the resource
+ associated with the status StatusReason. On
+ some operations may differ from the requested
+ resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: The name attribute of the resource
+ associated with the status StatusReason (when
+ there is a single name which can be described).
+ type: string
+ retryAfterSeconds:
+ description: If specified, the time in seconds
+ before the operation should be retried. Some
+ errors may indicate the client must take an
+ alternate action - for those errors this field
+ may indicate how long to wait before taking
+ the alternate action.
+ format: int32
+ type: integer
+ uid:
+ description: 'UID of the resource. (when there
+ is a single resource which can be described).
+ More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ kind:
+ description: 'Kind is a string value representing
+ the REST resource this object represents. Servers
+ may infer this from the endpoint the client submits
+ requests to. Cannot be updated. In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ message:
+ description: A human-readable description of the
+ status of this operation.
+ type: string
+ metadata:
+ description: ListMeta describes metadata that synthetic
+ resources must have, including lists and various
+ status objects. A resource may have only one of
+ {ObjectMeta, ListMeta}.
+ properties:
+ continue:
+ description: continue may be set if the user
+ set a limit on the number of items returned,
+ and indicates that the server has more data
+ available. The value is opaque and may be
+ used to issue another request to the endpoint
+ that served this list to retrieve the next
+ set of available objects. Continuing a consistent
+ list may not be possible if the server configuration
+ has changed or more than a few minutes have
+ passed. The resourceVersion field returned
+ when using this continue value will be identical
+ to the value in the first response, unless
+ you have received this token from an error
+ message.
+ type: string
+ resourceVersion:
+ description: 'String that identifies the server''s
+ internal version of this object that can be
+ used by clients to determine when objects
+ have changed. Value must be treated as opaque
+ by clients and passed unmodified back to the
+ server. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ selfLink:
+ description: selfLink is a URL representing
+ this object. Populated by the system. Read-only.
+ type: string
+ reason:
+ description: A machine-readable description of why
+ this operation is in the "Failure" status. If
+ this value is empty there is no information available.
+ A Reason clarifies an HTTP status code but does
+ not override it.
+ type: string
+ status:
+ description: 'Status of the operation. One of: "Success"
+ or "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
+ type: string
+ required:
+ - pending
+ labels:
+ description: 'Map of string keys and values that can be
+ used to organize and categorize (scope and select) objects.
+ May match selectors of replication controllers and services.
+ More info: http://kubernetes.io/docs/user-guide/labels'
+ type: object
+ name:
+ description: 'Name must be unique within a namespace. Is
+ required when creating resources, although some resources
+ may allow a client to request the generation of an appropriate
+ name automatically. Name is primarily intended for creation
+ idempotence and configuration definition. Cannot be updated.
+ More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ namespace:
+ description: |-
+ Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.
+
+ Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces
+ type: string
+ ownerReferences:
+ description: List of objects depended by this object. If
+ ALL objects in the list have been deleted, this object
+ will be garbage collected. If this object is managed by
+ a controller, then an entry in this list will point to
+ this controller, with the controller field set to true.
+ There cannot be more than one managing controller.
+ items:
+ description: OwnerReference contains enough information
+ to let you identify an owning object. Currently, an
+ owning object must be in the same namespace, so there
+ is no namespace field.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ blockOwnerDeletion:
+ description: If true, AND if the owner has the "foregroundDeletion"
+ finalizer, then the owner cannot be deleted from
+ the key-value store until this reference is removed.
+ Defaults to false. To set this field, a user needs
+ "delete" permission of the owner, otherwise 422
+ (Unprocessable Entity) will be returned.
+ type: boolean
+ controller:
+ description: If true, this reference points to the
+ managing controller.
+ type: boolean
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ required:
+ - apiVersion
+ - kind
+ - name
+ - uid
+ type: array
+ resourceVersion:
+ description: |-
+ An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.
+
+ Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ selfLink:
+ description: SelfLink is a URL representing this object.
+ Populated by the system. Read-only.
+ type: string
+ uid:
+ description: |-
+ UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.
+
+ Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+ type: string
+ spec:
+ description: PersistentVolumeClaimSpec describes the common
+ attributes of storage devices and allows a Source for provider-specific
+ attributes
+ properties:
+ accessModes:
+ description: 'AccessModes contains the desired access modes
+ the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: TypedLocalObjectReference contains enough information
+ to let you locate the typed referenced object inside the
+ same namespace.
+ properties:
+ apiGroup:
+ description: APIGroup is the group for the resource
+ being referenced. If APIGroup is not specified, the
+ specified Kind must be in the core API group. For
+ any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ resources:
+ description: ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of
+ compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ description: 'Requests describes the minimum amount
+ of compute resources required. If Requests is omitted
+ for a container, it defaults to Limits if that is
+ explicitly specified, otherwise to an implementation-defined
+ value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ selector:
+ description: A label selector is a label query over a set
+ of resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects.
+ A null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be empty.
+ This array is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field
+ is "key", the operator is "In", and the values array
+ contains only "value". The requirements are ANDed.
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ volumeMode:
+ description: volumeMode defines what type of volume is required
+ by the claim. Value of Filesystem is implied when not
+ included in claim spec. This is an alpha feature and may
+ change in the future.
+ type: string
+ volumeName:
+ description: VolumeName is the binding reference to the
+ PersistentVolume backing this claim.
+ type: string
+ status:
+ description: PersistentVolumeClaimStatus is the current status
+ of a persistent volume claim.
+ properties:
+ accessModes:
+ description: 'AccessModes contains the actual access modes
+ the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+ items:
+ type: string
+ type: array
+ capacity:
+ description: Represents the actual resources of the underlying
+ volume.
+ type: object
+ conditions:
+ description: Current Condition of persistent volume claim.
+ If underlying persistent volume is being resized then
+ the Condition will be set to 'ResizeStarted'.
+ items:
+ description: PersistentVolumeClaimCondition contails details
+ about state of pvc
+ properties:
+ lastProbeTime:
+ description: Time is a wrapper around time.Time which
+ supports correct marshaling to YAML and JSON. Wrappers
+ are provided for many of the factory methods that
+ the time package offers.
+ format: date-time
+ type: string
+ lastTransitionTime:
+ description: Time is a wrapper around time.Time which
+ supports correct marshaling to YAML and JSON. Wrappers
+ are provided for many of the factory methods that
+ the time package offers.
+ format: date-time
+ type: string
+ message:
+ description: Human-readable message indicating details
+ about last transition.
+ type: string
+ reason:
+ description: Unique, this should be a short, machine
+ understandable string that gives the reason for
+ condition's last transition. If it reports "ResizeStarted"
+ that means the underlying persistent volume is being
+ resized.
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ - status
+ type: array
+ phase:
+ description: Phase represents the current phase of PersistentVolumeClaim.
+ type: string
+ tag:
+ description: Tag of Prometheus container image to be deployed. Defaults
+ to the value of `version`. Version is ignored if Tag is set.
+ type: string
+ thanos:
+ description: ThanosSpec defines parameters for a Prometheus server within
+ a Thanos deployment.
+ properties:
+ baseImage:
+ description: Thanos base image if other than default.
+ type: string
+ gcs:
+ description: ThanosGCSSpec defines parameters for use of Google
+ Cloud Storage (GCS) with Thanos.
+ properties:
+ bucket:
+ description: Google Cloud Storage bucket name for stored blocks.
+ If empty it won't store any block inside Google Cloud Storage.
+ type: string
+ credentials:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must
+ be defined
+ type: boolean
+ required:
+ - key
+ peers:
+ description: Peers is a DNS name for Thanos to discover peers through.
+ type: string
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ s3:
+ description: ThanosS3Spec defines parameters for of AWS Simple Storage
+ Service (S3) with Thanos. (S3 compatible services apply as well)
+ properties:
+ accessKey:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must
+ be defined
+ type: boolean
+ required:
+ - key
+ bucket:
+ description: S3-Compatible API bucket name for stored blocks.
+ type: string
+ encryptsse:
+ description: Whether to use Server Side Encryption
+ type: boolean
+ endpoint:
+ description: S3-Compatible API endpoint for stored blocks.
+ type: string
+ insecure:
+ description: Whether to use an insecure connection with an S3-Compatible
+ API.
+ type: boolean
+ secretKey:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must
+ be defined
+ type: boolean
+ required:
+ - key
+ signatureVersion2:
+ description: Whether to use S3 Signature Version 2; otherwise
+ Signature Version 4 will be used.
+ type: boolean
+ sha:
+ description: SHA of Thanos container image to be deployed. Defaults
+ to the value of `version`. Similar to a tag, but the SHA explicitly
+ deploys an immutable container image. Version and Tag are ignored
+ if SHA is set.
+ type: string
+ tag:
+ description: Tag of Thanos sidecar container image to be deployed.
+ Defaults to the value of `version`. Version is ignored if Tag
+ is set.
+ type: string
+ version:
+ description: Version describes the version of Thanos to use.
+ type: string
+ tolerations:
+ description: If specified, the pod's tolerations.
+ items:
+ description: The pod this Toleration is attached to tolerates any
+ taint that matches the triple <key,value,effect> using the matching
+ operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty, operator
+ must be Exists; this combination means to match all values and
+ all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal. Exists
+ is equivalent to wildcard for value, so that a pod can tolerate
+ all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the
+ toleration (which must be of effect NoExecute, otherwise this
+ field is ignored) tolerates the taint. By default, it is not
+ set, which means tolerate the taint forever (do not evict).
+ Zero and negative values will be treated as 0 (evict immediately)
+ by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise
+ just a regular string.
+ type: string
+ type: array
+ version:
+ description: Version of Prometheus to be deployed.
+ type: string
+ status:
+ description: 'PrometheusStatus is the most recent observed status of the
+ Prometheus cluster. Read-only. Not included when requesting from the apiserver,
+ only from the Prometheus Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status'
+ properties:
+ availableReplicas:
+ description: Total number of available pods (ready for at least minReadySeconds)
+ targeted by this Prometheus deployment.
+ format: int32
+ type: integer
+ paused:
+ description: Represents whether any actions on the underlaying managed
+ objects are being performed. Only delete actions will be performed.
+ type: boolean
+ replicas:
+ description: Total number of non-terminated pods targeted by this Prometheus
+ deployment (their labels match the selector).
+ format: int32
+ type: integer
+ unavailableReplicas:
+ description: Total number of unavailable pods targeted by this Prometheus
+ deployment.
+ format: int32
+ type: integer
+ updatedReplicas:
+ description: Total number of non-terminated pods targeted by this Prometheus
+ deployment that have the desired version spec.
+ format: int32
+ type: integer
+ required:
+ - paused
+ - replicas
+ - updatedReplicas
+ - availableReplicas
+ - unavailableReplicas
+ version: v1
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-prometheusrules.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-prometheusrules.yaml
new file mode 100644
index 00000000..9839687e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-prometheusrules.yaml
@@ -0,0 +1,360 @@
+{{- if and .Release.IsInstall .Values.prometheusOperator.enabled .Values.prometheusOperator.createCustomResource -}}
+# Source https://github.com/coreos/prometheus-operator/blob/master/contrib/kube-prometheus/manifests/0prometheus-operator-0prometheusruleCustomResourceDefinition.yaml
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: {{ printf "prometheusrules.%s" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+ annotations:
+ "helm.sh/hook": crd-install
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .metadata.creationTimestamp
+ description: |-
+ CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+
+ Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+ name: Age
+ type: date
+ group: {{ .Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com" }}
+ names:
+ kind: PrometheusRule
+ listKind: PrometheusRuleList
+ plural: prometheusrules
+ singular: prometheusrule
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ description: ObjectMeta is metadata that all persisted resources must have,
+ which includes all objects users must create.
+ properties:
+ annotations:
+ description: 'Annotations is an unstructured key value map stored with
+ a resource that may be set by external tools to store and retrieve
+ arbitrary metadata. They are not queryable and should be preserved
+ when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
+ type: object
+ clusterName:
+ description: The name of the cluster which the object belongs to. This
+ is used to distinguish resources with same name and namespace in different
+ clusters. This field is not set anywhere right now and apiserver is
+ going to ignore it if set in create or update request.
+ type: string
+ creationTimestamp:
+ description: Time is a wrapper around time.Time which supports correct
+ marshaling to YAML and JSON. Wrappers are provided for many of the
+ factory methods that the time package offers.
+ format: date-time
+ type: string
+ deletionGracePeriodSeconds:
+ description: Number of seconds allowed for this object to gracefully
+ terminate before it will be removed from the system. Only set when
+ deletionTimestamp is also set. May only be shortened. Read-only.
+ format: int64
+ type: integer
+ deletionTimestamp:
+ description: Time is a wrapper around time.Time which supports correct
+ marshaling to YAML and JSON. Wrappers are provided for many of the
+ factory methods that the time package offers.
+ format: date-time
+ type: string
+ finalizers:
+ description: Must be empty before the object is deleted from the registry.
+ Each entry is an identifier for the responsible component that will
+ remove the entry from the list. If the deletionTimestamp of the object
+ is non-nil, entries in this list can only be removed.
+ items:
+ type: string
+ type: array
+ generateName:
+ description: |-
+ GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.
+
+ If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).
+
+ Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
+ type: string
+ generation:
+ description: A sequence number representing a specific generation of
+ the desired state. Populated by the system. Read-only.
+ format: int64
+ type: integer
+ initializers:
+ description: Initializers tracks the progress of initialization.
+ properties:
+ pending:
+ description: Pending is a list of initializers that must execute
+ in order before this object is visible. When the last pending
+ initializer is removed, and no failing result is set, the initializers
+ struct will be set to nil and the object is considered as initialized
+ and visible to all clients.
+ items:
+ description: Initializer is information about an initializer that
+ has not yet completed.
+ properties:
+ name:
+ description: name of the process that is responsible for initializing
+ this object.
+ type: string
+ required:
+ - name
+ type: array
+ result:
+ description: Status is a return value for calls that don't return
+ other objects.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this
+ representation of an object. Servers should convert recognized
+ schemas to the latest internal value, and may reject unrecognized
+ values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ code:
+ description: Suggested HTTP return code for this status, 0 if
+ not set.
+ format: int32
+ type: integer
+ details:
+ description: StatusDetails is a set of additional properties
+ that MAY be set by the server to provide additional information
+ about a response. The Reason field of a Status object defines
+ what attributes will be set. Clients must ignore fields that
+ do not match the defined type of each attribute, and should
+ assume that any attribute may be empty, invalid, or under
+ defined.
+ properties:
+ causes:
+ description: The Causes array includes more details associated
+ with the StatusReason failure. Not all StatusReasons may
+ provide detailed causes.
+ items:
+ description: StatusCause provides more information about
+ an api.Status failure, including cases when multiple
+ errors are encountered.
+ properties:
+ field:
+ description: |-
+ The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.
+
+ Examples:
+ "name" - the field "name" on the current resource
+ "items[0].name" - the field "name" on the first array entry in "items"
+ type: string
+ message:
+ description: A human-readable description of the cause
+ of the error. This field may be presented as-is
+ to a reader.
+ type: string
+ reason:
+ description: A machine-readable description of the
+ cause of the error. If this value is empty there
+ is no information available.
+ type: string
+ type: array
+ group:
+ description: The group attribute of the resource associated
+ with the status StatusReason.
+ type: string
+ kind:
+ description: 'The kind attribute of the resource associated
+ with the status StatusReason. On some operations may differ
+ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: The name attribute of the resource associated
+ with the status StatusReason (when there is a single name
+ which can be described).
+ type: string
+ retryAfterSeconds:
+ description: If specified, the time in seconds before the
+ operation should be retried. Some errors may indicate
+ the client must take an alternate action - for those errors
+ this field may indicate how long to wait before taking
+ the alternate action.
+ format: int32
+ type: integer
+ uid:
+ description: 'UID of the resource. (when there is a single
+ resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource
+ this object represents. Servers may infer this from the endpoint
+ the client submits requests to. Cannot be updated. In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ message:
+ description: A human-readable description of the status of this
+ operation.
+ type: string
+ metadata:
+ description: ListMeta describes metadata that synthetic resources
+ must have, including lists and various status objects. A resource
+ may have only one of {ObjectMeta, ListMeta}.
+ properties:
+ continue:
+ description: continue may be set if the user set a limit
+ on the number of items returned, and indicates that the
+ server has more data available. The value is opaque and
+ may be used to issue another request to the endpoint that
+ served this list to retrieve the next set of available
+ objects. Continuing a list may not be possible if the
+ server configuration has changed or more than a few minutes
+ have passed. The resourceVersion field returned when using
+ this continue value will be identical to the value in
+ the first response.
+ type: string
+ resourceVersion:
+ description: 'String that identifies the server''s internal
+ version of this object that can be used by clients to
+ determine when objects have changed. Value must be treated
+ as opaque by clients and passed unmodified back to the
+ server. Populated by the system. Read-only. More info:
+ https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ selfLink:
+ description: selfLink is a URL representing this object.
+ Populated by the system. Read-only.
+ type: string
+ reason:
+ description: A machine-readable description of why this operation
+ is in the "Failure" status. If this value is empty there is
+ no information available. A Reason clarifies an HTTP status
+ code but does not override it.
+ type: string
+ status:
+ description: 'Status of the operation. One of: "Success" or
+ "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status'
+ type: string
+ required:
+ - pending
+ labels:
+ description: 'Map of string keys and values that can be used to organize
+ and categorize (scope and select) objects. May match selectors of
+ replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels'
+ type: object
+ name:
+ description: 'Name must be unique within a namespace. Is required when
+ creating resources, although some resources may allow a client to
+ request the generation of an appropriate name automatically. Name
+ is primarily intended for creation idempotence and configuration definition.
+ Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ namespace:
+ description: |-
+ Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.
+
+ Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces
+ type: string
+ ownerReferences:
+ description: List of objects depended by this object. If ALL objects
+ in the list have been deleted, this object will be garbage collected.
+ If this object is managed by a controller, then an entry in this list
+ will point to this controller, with the controller field set to true.
+ There cannot be more than one managing controller.
+ items:
+ description: OwnerReference contains enough information to let you
+ identify an owning object. Currently, an owning object must be in
+ the same namespace, so there is no namespace field.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ blockOwnerDeletion:
+ description: If true, AND if the owner has the "foregroundDeletion"
+ finalizer, then the owner cannot be deleted from the key-value
+ store until this reference is removed. Defaults to false. To
+ set this field, a user needs "delete" permission of the owner,
+ otherwise 422 (Unprocessable Entity) will be returned.
+ type: boolean
+ controller:
+ description: If true, this reference points to the managing controller.
+ type: boolean
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
+ type: string
+ required:
+ - apiVersion
+ - kind
+ - name
+ - uid
+ type: array
+ resourceVersion:
+ description: |-
+ An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.
+
+ Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ selfLink:
+ description: SelfLink is a URL representing this object. Populated by
+ the system. Read-only.
+ type: string
+ uid:
+ description: |-
+ UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.
+
+ Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+ type: string
+ spec:
+ description: PrometheusRuleSpec contains specification parameters for a
+ Rule.
+ properties:
+ groups:
+ description: Content of Prometheus rule file
+ items:
+ description: RuleGroup is a list of sequentially evaluated recording
+ and alerting rules.
+ properties:
+ interval:
+ type: string
+ name:
+ type: string
+ rules:
+ items:
+ description: Rule describes an alerting or recording rule.
+ properties:
+ alert:
+ type: string
+ annotations:
+ type: object
+ expr:
+ anyOf:
+ - type: string
+ - type: integer
+ for:
+ type: string
+ labels:
+ type: object
+ record:
+ type: string
+ required:
+ - expr
+ type: array
+ required:
+ - name
+ - rules
+ type: array
+ version: v1
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-servicemonitor.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-servicemonitor.yaml
new file mode 100644
index 00000000..ac0a633b
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/crd-servicemonitor.yaml
@@ -0,0 +1,310 @@
+{{- if and .Release.IsInstall .Values.prometheusOperator.enabled .Values.prometheusOperator.createCustomResource -}}
+# Source: https://github.com/coreos/prometheus-operator/blob/master/contrib/kube-prometheus/manifests/0prometheus-operator-0servicemonitorCustomResourceDefinition.yaml
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: {{ printf "servicemonitors.%s" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+ annotations:
+ "helm.sh/hook": crd-install
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .metadata.creationTimestamp
+ description: |-
+ CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+
+ Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
+ name: Age
+ type: date
+ group: {{ .Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com" }}
+ names:
+ kind: ServiceMonitor
+ listKind: ServiceMonitorList
+ plural: servicemonitors
+ singular: servicemonitor
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: ServiceMonitorSpec contains specification parameters for a
+ ServiceMonitor.
+ properties:
+ endpoints:
+ description: A list of endpoints allowed as part of this ServiceMonitor.
+ items:
+ description: Endpoint defines a scrapeable endpoint serving Prometheus
+ metrics.
+ properties:
+ basicAuth:
+ description: 'BasicAuth allow an endpoint to authenticate over
+ basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints'
+ properties:
+ password:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must
+ be defined
+ type: boolean
+ required:
+ - key
+ username:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or it's key must
+ be defined
+ type: boolean
+ required:
+ - key
+ bearerTokenFile:
+ description: File to read bearer token for scraping targets.
+ type: string
+ honorLabels:
+ description: HonorLabels chooses the metric's labels on collisions
+ with target labels.
+ type: boolean
+ interval:
+ description: Interval at which metrics should be scraped
+ type: string
+ metricRelabelings:
+ description: MetricRelabelConfigs to apply to samples before ingestion.
+ items:
+ description: 'RelabelConfig allows dynamic rewriting of the
+ label set, being applied to samples before ingestion. It defines
+ `<metric_relabel_configs>`-section of Prometheus configuration.
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
+ properties:
+ action:
+ description: Action to perform based on regex matching.
+ Default is 'replace'
+ type: string
+ modulus:
+ description: Modulus to take of the hash of the source label
+ values.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched. defailt is '(.*)'
+ type: string
+ replacement:
+ description: Replacement value against which a regex replace
+ is performed if the regular expression matches. Regex
+ capture groups are available. Default is '$1'
+ type: string
+ separator:
+ description: Separator placed between concatenated source
+ label values. default is ';'.
+ type: string
+ sourceLabels:
+ description: The source labels select values from existing
+ labels. Their content is concatenated using the configured
+ separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
+ items:
+ type: string
+ type: array
+ targetLabel:
+ description: Label to which the resulting value is written
+ in a replace action. It is mandatory for replace actions.
+ Regex capture groups are available.
+ type: string
+ type: array
+ params:
+ description: Optional HTTP URL parameters
+ type: object
+ path:
+ description: HTTP path to scrape for metrics.
+ type: string
+ port:
+ description: Name of the service port this endpoint refers to.
+ Mutually exclusive with targetPort.
+ type: string
+ proxyUrl:
+ description: ProxyURL eg http://proxyserver:2195 Directs scrapes
+ to proxy through this endpoint.
+ type: string
+ relabelings:
+ description: 'RelabelConfigs to apply to samples before ingestion.
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<relabel_config>'
+ items:
+ description: 'RelabelConfig allows dynamic rewriting of the
+ label set, being applied to samples before ingestion. It defines
+ `<metric_relabel_configs>`-section of Prometheus configuration.
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
+ properties:
+ action:
+ description: Action to perform based on regex matching.
+ Default is 'replace'
+ type: string
+ modulus:
+ description: Modulus to take of the hash of the source label
+ values.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched. default is '(.*)'
+ type: string
+ replacement:
+ description: Replacement value against which a regex replace
+ is performed if the regular expression matches. Regex
+ capture groups are available. Default is '$1'
+ type: string
+ separator:
+ description: Separator placed between concatenated source
+ label values. default is ';'.
+ type: string
+ sourceLabels:
+ description: The source labels select values from existing
+ labels. Their content is concatenated using the configured
+ separator and matched against the configured regular expression
+ for the replace, keep, and drop actions.
+ items:
+ type: string
+ type: array
+ targetLabel:
+ description: Label to which the resulting value is written
+ in a replace action. It is mandatory for replace actions.
+ Regex capture groups are available.
+ type: string
+ type: array
+ scheme:
+ description: HTTP scheme to use for scraping.
+ type: string
+ scrapeTimeout:
+ description: Timeout after which the scrape is ended
+ type: string
+ targetPort:
+ anyOf:
+ - type: string
+ - type: integer
+ tlsConfig:
+ description: TLSConfig specifies TLS configuration parameters.
+ properties:
+ caFile:
+ description: The CA cert to use for the targets.
+ type: string
+ certFile:
+ description: The client cert file for the targets.
+ type: string
+ insecureSkipVerify:
+ description: Disable target certificate validation.
+ type: boolean
+ keyFile:
+ description: The client key file for the targets.
+ type: string
+ serverName:
+ description: Used to verify the hostname for the targets.
+ type: string
+ type: array
+ jobLabel:
+ description: The label to use to retrieve the job name from.
+ type: string
+ namespaceSelector:
+ description: NamespaceSelector is a selector for selecting either all
+ namespaces or a list of namespaces.
+ properties:
+ any:
+ description: Boolean describing whether all namespaces are selected
+ in contrast to a list restricting them.
+ type: boolean
+ matchNames:
+ description: List of namespace names.
+ items:
+ type: string
+ type: array
+ podTargetLabels:
+ description: PodTargetLabels transfers labels on the Kubernetes Pod
+ onto the target.
+ items:
+ type: string
+ type: array
+ sampleLimit:
+ description: SampleLimit defines per-scrape limit on number of scraped
+ samples that will be accepted.
+ format: int64
+ type: integer
+ selector:
+ description: A label selector is a label query over a set of resources.
+ The result of matchLabels and matchExpressions are ANDed. An empty
+ label selector matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains
+ values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a
+ set of values. Valid operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator
+ is In or NotIn, the values array must be non-empty. If the
+ operator is Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single
+ {key,value} in the matchLabels map is equivalent to an element
+ of matchExpressions, whose key field is "key", the operator is
+ "In", and the values array contains only "value". The requirements
+ are ANDed.
+ type: object
+ targetLabels:
+ description: TargetLabels transfers labels on the Kubernetes Service
+ onto the target.
+ items:
+ type: string
+ type: array
+ required:
+ - endpoints
+ - selector
+ version: v1
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/deployment.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/deployment.yaml
new file mode 100644
index 00000000..809d6fa8
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/deployment.yaml
@@ -0,0 +1,71 @@
+{{- if .Values.prometheusOperator.enabled }}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-operator
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+ release: {{ .Release.Name | quote }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 8 }}
+{{- if .Values.prometheusOperator.podLabels }}
+{{ toYaml .Values.prometheusOperator.podLabels | indent 8 }}
+{{- end }}
+ spec:
+ {{- if .Values.prometheusOperator.priorityClassName }}
+ priorityClassName: {{ .Values.prometheusOperator.priorityClassName }}
+ {{- end }}
+ containers:
+ - name: {{ template "prometheus-operator.name" . }}
+ image: "{{ .Values.prometheusOperator.image.repository }}:{{ .Values.prometheusOperator.image.tag }}"
+ imagePullPolicy: "{{ .Values.prometheusOperator.image.pullPolicy }}"
+ args:
+ {{- if .Values.prometheusOperator.kubeletService.enabled }}
+ - --kubelet-service={{ .Values.prometheusOperator.kubeletService.namespace }}/{{ template "prometheus-operator.fullname" . }}-kubelet
+ {{- end }}
+ {{- if .Values.prometheusOperator.logFormat }}
+ - --log-format={{ .Values.prometheusOperator.logFormat }}
+ {{- end }}
+ {{- if .Values.prometheusOperator.logLevel }}
+ - --log-level={{ .Values.prometheusOperator.logLevel }}
+ {{- end }}
+ - --logtostderr=true
+ - --crd-apigroup={{ .Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com" }}
+ - --localhost=127.0.0.1
+ - --prometheus-config-reloader={{ .Values.prometheusOperator.prometheusConfigReloaderImage.repository }}:{{ .Values.prometheusOperator.prometheusConfigReloaderImage.tag }}
+ - --config-reloader-image={{ .Values.prometheusOperator.configmapReloadImage.repository }}:{{ .Values.prometheusOperator.configmapReloadImage.tag }}
+ ports:
+ - containerPort: 8080
+ name: http
+ resources:
+{{ toYaml .Values.prometheusOperator.resources | indent 12 }}
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+{{- if .Values.prometheusOperator.securityContext }}
+ securityContext:
+{{ toYaml .Values.prometheusOperator.securityContext | indent 8 }}
+{{- end }}
+ serviceAccountName: {{ template "prometheus-operator.operator.serviceAccountName" . }}
+ {{- with .Values.prometheusOperator.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.prometheusOperator.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.prometheusOperator.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp-clusterrole.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp-clusterrole.yaml
new file mode 100644
index 00000000..748ec293
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp-clusterrole.yaml
@@ -0,0 +1,15 @@
+{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-operator-psp
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+rules:
+- apiGroups: ['extensions']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames:
+ - {{ template "prometheus-operator.fullname" . }}-operator
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp-clusterrolebinding.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp-clusterrolebinding.yaml
new file mode 100644
index 00000000..77a87a39
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp-clusterrolebinding.yaml
@@ -0,0 +1,17 @@
+{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-operator-psp
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "prometheus-operator.fullname" . }}-operator-psp
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "prometheus-operator.operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp.yaml
new file mode 100644
index 00000000..38491a21
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/psp.yaml
@@ -0,0 +1,47 @@
+{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-operator
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ privileged: false
+ # Required to prevent escalations to root.
+ # allowPrivilegeEscalation: false
+ # This is redundant with non-root + disallow privilege escalation,
+ # but we can provide it for defense in depth.
+ #requiredDropCapabilities:
+ # - ALL
+ # Allow core volume types.
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ - 'persistentVolumeClaim'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ # Permits the container to run with root privileges as well.
+ rule: 'RunAsAny'
+ seLinux:
+ # This policy assumes the nodes are using AppArmor rather than SELinux.
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 0
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 0
+ max: 65535
+ readOnlyRootFilesystem: false
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/service.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/service.yaml
new file mode 100644
index 00000000..60913e10
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/service.yaml
@@ -0,0 +1,41 @@
+{{- if .Values.prometheusOperator.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-operator
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.prometheusOperator.service.annotations }}
+ annotations:
+{{ toYaml .Values.prometheusOperator.service.annotations | indent 4 }}
+{{- end }}
+spec:
+{{- if .Values.prometheusOperator.service.clusterIP }}
+ clusterIP: {{ .Values.prometheusOperator.service.clusterIP }}
+{{- end }}
+{{- if .Values.prometheusOperator.service.externalIPs }}
+ externalIPs:
+{{ toYaml .Values.prometheusOperator.service.externalIPs | indent 4 }}
+{{- end }}
+{{- if .Values.prometheusOperator.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.prometheusOperator.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.prometheusOperator.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.prometheusOperator.service.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+{{- end }}
+ ports:
+ - name: http
+ {{- if eq .Values.prometheusOperator.service.type "NodePort" }}
+ nodePort: {{ .Values.prometheusOperator.service.nodePort }}
+ {{- end }}
+ port: 8080
+ targetPort: http
+ selector:
+ app: {{ template "prometheus-operator.name" . }}-operator
+ release: {{ .Release.Name | quote }}
+ type: "{{ .Values.prometheusOperator.service.type }}"
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/serviceaccount.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/serviceaccount.yaml
new file mode 100644
index 00000000..2cffa7de
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.prometheusOperator.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "prometheus-operator.operator.serviceAccountName" . }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+imagePullSecrets:
+{{ toYaml .Values.global.imagePullSecrets | indent 2 }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/servicemonitor.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/servicemonitor.yaml
new file mode 100644
index 00000000..9532c1f9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus-operator/servicemonitor.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.serviceMonitor.selfMonitor }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: ServiceMonitor
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-operator
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ endpoints:
+ - port: http
+ honorLabels: true
+ selector:
+ matchLabels:
+ app: {{ template "prometheus-operator.name" . }}-operator
+ release: {{ .Release.Name | quote }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace | quote }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalAlertRelabelConfigs.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalAlertRelabelConfigs.yaml
new file mode 100644
index 00000000..1c54f40b
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalAlertRelabelConfigs.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus-am-relabel-confg
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus-am-relabel-confg
+{{ include "prometheus-operator.labels" . | indent 4 }}
+data:
+ additional-alert-relabel-configs.yaml: {{ toYaml .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs | b64enc | quote }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalAlertmanagerConfigs.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalAlertmanagerConfigs.yaml
new file mode 100644
index 00000000..4475e7bd
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalAlertmanagerConfigs.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus-am-confg
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus-am-confg
+{{ include "prometheus-operator.labels" . | indent 4 }}
+data:
+ additional-alertmanager-configs.yaml: {{ toYaml .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs | b64enc | quote }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalPrometheusRules.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalPrometheusRules.yaml
new file mode 100644
index 00000000..0d85c9bd
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalPrometheusRules.yaml
@@ -0,0 +1,20 @@
+{{- if .Values.additionalPrometheusRules }}
+apiVersion: v1
+kind: List
+items:
+{{- range .Values.additionalPrometheusRules }}
+ - apiVersion: {{ printf "%s/v1" ($.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+ kind: PrometheusRule
+ metadata:
+ name: {{ template "prometheus-operator.name" $ }}-{{ .name }}
+ labels:
+ app: {{ template "prometheus-operator.name" $ }}
+{{ include "prometheus-operator.labels" $ | indent 8 }}
+ {{- if .additionalLabels }}
+{{ toYaml .additionalLabels | indent 8 }}
+ {{- end }}
+ spec:
+ groups:
+{{ toYaml .groups| indent 8 }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalScrapeConfigs.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalScrapeConfigs.yaml
new file mode 100644
index 00000000..9d6bb616
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/additionalScrapeConfigs.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalScrapeConfigs }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus-scrape-confg
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus-scrape-confg
+{{ include "prometheus-operator.labels" . | indent 4 }}
+data:
+ additional-scrape-configs.yaml: {{ toYaml .Values.prometheus.prometheusSpec.additionalScrapeConfigs | b64enc | quote }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/clusterrole.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/clusterrole.yaml
new file mode 100644
index 00000000..799027d9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/clusterrole.yaml
@@ -0,0 +1,35 @@
+{{- if and .Values.prometheus.enabled .Values.global.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes/metrics
+ verbs:
+ - get
+ - list
+ - watch
+# This permission are not in the prometheus-operator repo
+# they're grabbed from https://github.com/prometheus/prometheus/blob/master/documentation/examples/rbac-setup.yml
+- apiGroups: [""]
+ resources:
+ - nodes
+ - nodes/proxy
+ - services
+ - endpoints
+ - pods
+ verbs: ["get", "list", "watch"]
+- apiGroups:
+ - extensions
+ resources:
+ - ingresses
+ verbs: ["get", "list", "watch"]
+- nonResourceURLs: ["/metrics"]
+ verbs: ["get"]
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/clusterrolebinding.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/clusterrolebinding.yaml
new file mode 100644
index 00000000..b0c0e9e1
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/clusterrolebinding.yaml
@@ -0,0 +1,18 @@
+{{- if and .Values.prometheus.enabled .Values.global.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "prometheus-operator.prometheus.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end }}
+
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/ingress.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/ingress.yaml
new file mode 100644
index 00000000..e013e960
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/ingress.yaml
@@ -0,0 +1,33 @@
+{{- if and .Values.prometheus.enabled .Values.prometheus.ingress.enabled }}
+{{- $routePrefix := .Values.prometheus.prometheusSpec.routePrefix }}
+{{- $serviceName := printf "%s-%s" (include "prometheus-operator.fullname" .) "prometheus" }}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+{{- if .Values.prometheus.ingress.annotations }}
+ annotations:
+{{ toYaml .Values.prometheus.ingress.annotations | indent 4 }}
+{{- end }}
+ name: {{ $serviceName }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.prometheus.ingress.labels }}
+{{ toYaml .Values.prometheus.ingress.labels | indent 4 }}
+{{- end }}
+spec:
+ rules:
+ {{- range $host := .Values.prometheus.ingress.hosts }}
+ - host: {{ . }}
+ http:
+ paths:
+ - path: "{{ $routePrefix }}"
+ backend:
+ serviceName: {{ $serviceName }}
+ servicePort: 9090
+ {{- end }}
+{{- if .Values.prometheus.ingress.tls }}
+ tls:
+{{ toYaml .Values.prometheus.ingress.tls | indent 4 }}
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/podDisruptionBudget.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/podDisruptionBudget.yaml
new file mode 100644
index 00000000..a51cda5d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/podDisruptionBudget.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.prometheus.enabled .Values.prometheus.podDisruptionBudget.enabled }}
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ {{- if .Values.prometheus.podDisruptionBudget.minAvailable }}
+ minAvailable: {{ .Values.prometheus.podDisruptionBudget.minAvailable }}
+ {{- end }}
+ {{- if .Values.prometheus.podDisruptionBudget.maxUnavailable }}
+ maxUnavailable: {{ .Values.prometheus.podDisruptionBudget.maxUnavailable }}
+ {{- end }}
+ selector:
+ matchLabels:
+ app: prometheus
+ prometheus: {{ template "prometheus-operator.fullname" . }}-prometheus
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/prometheus.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/prometheus.yaml
new file mode 100644
index 00000000..c172c0e3
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/prometheus.yaml
@@ -0,0 +1,176 @@
+{{- if and .Values.prometheus.enabled .Values.prometheus.startup -}}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: Prometheus
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ alerting:
+ alertmanagers:
+{{- if .Values.prometheus.prometheusSpec.alertingEndpoints }}
+{{ toYaml .Values.prometheus.prometheusSpec.alertingEndpoints | indent 6 }}
+{{- else }}
+ - namespace: {{ .Release.Namespace }}
+ name: {{ template "prometheus-operator.fullname" . }}-alertmanager
+ port: web
+ {{- if .Values.alertmanager.alertmanagerSpec.routePrefix }}
+ pathPrefix: "{{ .Values.alertmanager.alertmanagerSpec.routePrefix }}"
+ {{- end }}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.image }}
+ baseImage: {{ .Values.prometheus.prometheusSpec.image.repository }}
+ version: {{ .Values.prometheus.prometheusSpec.image.tag }}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.externalLabels }}
+ externalLabels:
+{{ toYaml .Values.prometheus.prometheusSpec.externalLabels | indent 4}}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.externalUrl }}
+ externalUrl: "{{ .Values.prometheus.prometheusSpec.externalUrl }}"
+{{- else if .Values.prometheus.ingress.enabled }}
+ externalUrl: "http://{{ index .Values.prometheus.ingress.hosts 0 }}{{ .Values.prometheus.prometheusSpec.routePrefix }}"
+{{- else }}
+ externalUrl: http://{{ template "prometheus-operator.fullname" . }}-prometheus.{{ .Release.Namespace }}:9090
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.prometheus.prometheusSpec.nodeSelector | indent 4 }}
+{{- end }}
+ paused: {{ .Values.prometheus.prometheusSpec.paused }}
+ replicas: {{ .Values.prometheus.prometheusSpec.replicas }}
+ logLevel: {{ .Values.prometheus.prometheusSpec.logLevel }}
+ listenLocal: {{ .Values.prometheus.prometheusSpec.listenLocal }}
+{{- if .Values.prometheus.prometheusSpec.scrapeInterval }}
+ scrapeInterval: {{ .Values.prometheus.prometheusSpec.scrapeInterval }}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.evaluationInterval }}
+ evaluationInterval: {{ .Values.prometheus.prometheusSpec.evaluationInterval }}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.resources }}
+ resources:
+{{ toYaml .Values.prometheus.prometheusSpec.resources | indent 4 }}
+{{- end }}
+ retention: {{ .Values.prometheus.prometheusSpec.retention | quote }}
+{{- if .Values.prometheus.prometheusSpec.routePrefix }}
+ routePrefix: {{ .Values.prometheus.prometheusSpec.routePrefix | quote }}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.secrets }}
+ secrets:
+{{ toYaml .Values.prometheus.prometheusSpec.secrets | indent 4 }}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.configMaps }}
+ configMaps:
+{{ toYaml .Values.prometheus.prometheusSpec.configMaps | indent 4 }}
+{{- end }}
+ serviceAccountName: {{ template "prometheus-operator.prometheus.serviceAccountName" . }}
+{{- if .Values.prometheus.prometheusSpec.serviceMonitorSelector }}
+ serviceMonitorSelector:
+{{ toYaml .Values.prometheus.prometheusSpec.serviceMonitorSelector | indent 4 }}
+{{ else if .Values.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues }}
+ serviceMonitorSelector:
+ matchLabels:
+ release: {{ .Release.Name | quote }}
+{{ else }}
+ serviceMonitorSelector: {}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.serviceMonitorNamespaceSelector }}
+ serviceMonitorNamespaceSelector:
+{{ toYaml .Values.prometheus.prometheusSpec.serviceMonitorNamespaceSelector | indent 4 }}
+{{ else }}
+ serviceMonitorNamespaceSelector: {}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.remoteRead }}
+ remoteRead:
+{{ toYaml .Values.prometheus.prometheusSpec.remoteRead | indent 4 }}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.remoteWrite }}
+ remoteWrite:
+{{ toYaml .Values.prometheus.prometheusSpec.remoteWrite | indent 4 }}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.securityContext }}
+ securityContext:
+{{ toYaml .Values.prometheus.prometheusSpec.securityContext | indent 4 }}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.ruleNamespaceSelector }}
+ ruleNamespaceSelector:
+{{ toYaml .Values.prometheus.prometheusSpec.ruleNamespaceSelector | indent 4 }}
+{{ else }}
+ ruleNamespaceSelector: {}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.ruleSelector }}
+ ruleSelector:
+{{ toYaml .Values.prometheus.prometheusSpec.ruleSelector | indent 4}}
+{{- else if .Values.prometheus.prometheusSpec.ruleSelectorNilUsesHelmValues }}
+ ruleSelector:
+ matchLabels:
+ app: {{ template "prometheus-operator.name" . }}
+ release: {{ .Release.Name | quote }}
+{{ else }}
+ ruleSelector: {}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.storageSpec }}
+ storage:
+{{ toYaml .Values.prometheus.prometheusSpec.storageSpec | indent 4 }}
+{{- end }}
+ {{- if .Values.prometheus.prometheusSpec.podMetadata }}
+ podMetadata:
+{{ toYaml .Values.prometheus.prometheusSpec.podMetadata | indent 4 }}
+ {{- end }}
+{{- if eq .Values.prometheus.prometheusSpec.podAntiAffinity "hard" }}
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: {{ .Values.prometheus.prometheusSpec.podAntiAffinityTopologyKey }}
+ labelSelector:
+ matchLabels:
+ app: prometheus
+ prometheus: {{ template "prometheus-operator.fullname" . }}-prometheus
+{{- else if eq .Values.prometheus.prometheusSpec.podAntiAffinity "soft" }}
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ topologyKey: {{ .Values.prometheus.prometheusSpec.podAntiAffinityTopologyKey }}
+ labelSelector:
+ matchLabels:
+ app: prometheus
+ prometheus: {{ template "prometheus-operator.fullname" . }}-prometheus
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.tolerations }}
+ tolerations:
+{{ toYaml .Values.prometheus.prometheusSpec.tolerations | indent 4 }}
+{{- end }}
+{{- if .Values.global.imagePullSecrets }}
+ imagePullSecrets:
+{{ toYaml .Values.global.imagePullSecrets | indent 4 }}
+{{- end }}
+{{- if or .Values.prometheus.prometheusSpec.additionalScrapeConfigs .Values.prometheus.prometheusSpec.additionalScrapeConfigsExternal }}
+ additionalScrapeConfigs:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus-scrape-confg
+ key: additional-scrape-configs.yaml
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs }}
+ additionalAlertManagerConfigs:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus-am-confg
+ key: additional-alertmanager-configs.yaml
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs }}
+ additionalAlertRelabelConfigs:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus-am-relabel-confg
+ key: additional-alert-relabel-configs.yaml
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.containers }}
+ containers:
+{{ toYaml .Values.prometheus.prometheusSpec.containers | indent 4 }}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.priorityClassName }}
+ priorityClassName: {{ .Values.prometheus.prometheusSpec.priorityClassName }}
+{{- end }}
+{{- if .Values.prometheus.prometheusSpec.thanos }}
+ thanos:
+{{ toYaml .Values.prometheus.prometheusSpec.thanos | indent 4 }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp-clusterrole.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp-clusterrole.yaml
new file mode 100644
index 00000000..a2ab02db
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp-clusterrole.yaml
@@ -0,0 +1,15 @@
+{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus-psp
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+rules:
+- apiGroups: ['extensions']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames:
+ - {{ template "prometheus-operator.fullname" . }}-prometheus
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp-clusterrolebinding.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp-clusterrolebinding.yaml
new file mode 100644
index 00000000..08faa722
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp-clusterrolebinding.yaml
@@ -0,0 +1,18 @@
+{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus-psp
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus-psp
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "prometheus-operator.prometheus.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end }}
+
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp.yaml
new file mode 100644
index 00000000..40d33462
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/psp.yaml
@@ -0,0 +1,47 @@
+{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ privileged: false
+ # Required to prevent escalations to root.
+ # allowPrivilegeEscalation: false
+ # This is redundant with non-root + disallow privilege escalation,
+ # but we can provide it for defense in depth.
+ #requiredDropCapabilities:
+ # - ALL
+ # Allow core volume types.
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ - 'persistentVolumeClaim'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ # Permits the container to run with root privileges as well.
+ rule: 'RunAsAny'
+ seLinux:
+ # This policy assumes the nodes are using AppArmor rather than SELinux.
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 0
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 0
+ max: 65535
+ readOnlyRootFilesystem: false
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/role-config.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/role-config.yaml
new file mode 100644
index 00000000..eef28dad
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/role-config.yaml
@@ -0,0 +1,16 @@
+{{- if and .Values.prometheus.enabled .Values.global.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus-config
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+{{- end}} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/role-specificNamespace.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/role-specificNamespace.yaml
new file mode 100644
index 00000000..9fe3f20e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/role-specificNamespace.yaml
@@ -0,0 +1,27 @@
+{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.prometheus.rbac.roleNamespaces }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleList
+items:
+{{- range uniq (append .Values.prometheus.rbac.roleNamespaces .Release.Namespace) }}
+- apiVersion: rbac.authorization.k8s.io/v1
+ kind: Role
+ metadata:
+ name: {{ template "prometheus-operator.fullname" $ }}-prometheus
+ labels:
+ app: {{ template "prometheus-operator.name" $ }}-prometheus
+{{ include "prometheus-operator.labels" $ | indent 6 }}
+ namespace: {{ . | quote }}
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ - services
+ - endpoints
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rolebinding-config.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rolebinding-config.yaml
new file mode 100644
index 00000000..89fb9ce7
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rolebinding-config.yaml
@@ -0,0 +1,17 @@
+{{- if and .Values.prometheus.enabled .Values.global.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus-config
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus
+subjects:
+- kind: ServiceAccount
+ name: {{ template "prometheus-operator.prometheus.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rolebinding-specificNamespace.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rolebinding-specificNamespace.yaml
new file mode 100644
index 00000000..64161876
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rolebinding-specificNamespace.yaml
@@ -0,0 +1,23 @@
+{{- if and .Values.prometheus.enabled .Values.global.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBindingList
+items:
+{{- range uniq (append .Values.prometheus.rbac.roleNamespaces .Release.Namespace) }}
+- apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: {{ template "prometheus-operator.fullname" $ }}-prometheus
+ labels:
+ app: {{ template "prometheus-operator.name" $ }}-prometheus
+{{ include "prometheus-operator.labels" $ | indent 6 }}
+ namespace: {{ . | quote }}
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "prometheus-operator.fullname" $ }}-prometheus
+ subjects:
+ - kind: ServiceAccount
+ name: {{ template "prometheus-operator.prometheus.serviceAccountName" $ }}
+ namespace: {{ $.Release.Namespace }}
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/alertmanager.rules.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/alertmanager.rules.yaml
new file mode 100644
index 00000000..c1762fd1
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/alertmanager.rules.yaml
@@ -0,0 +1,50 @@
+# Generated from 'alertmanager.rules' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.defaultRules.rules.alertmanager }}
+{{- $operatorJob := printf "%s-%s" (include "prometheus-operator.fullname" .) "operator" }}
+{{- $alertmanagerJob := printf "%s-%s" (include "prometheus-operator.fullname" .) "alertmanager" }}
+{{- $namespace := .Release.Namespace }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "alertmanager.rules" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: alertmanager.rules
+ rules:
+ - alert: AlertmanagerConfigInconsistent
+ annotations:
+ message: The configuration of the instances of the Alertmanager cluster `{{`{{$labels.service}}`}}` are out of sync.
+ expr: count_values("config_hash", alertmanager_config_hash{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"}) BY (service) / ON(service) GROUP_LEFT() label_replace(prometheus_operator_spec_replicas{job="{{ $operatorJob }}",namespace="{{ $namespace }}",controller="alertmanager"}, "service", "$1", "name", "(.*)") != 1
+ for: 5m
+ labels:
+ severity: critical
+ - alert: AlertmanagerFailedReload
+ annotations:
+ message: Reloading Alertmanager's configuration has failed for {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod}}`}}.
+ expr: alertmanager_config_last_reload_successful{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"} == 0
+ for: 10m
+ labels:
+ severity: warning
+ - alert: AlertmanagerMembersInconsistent
+ annotations:
+ message: Alertmanager has not found all other members of the cluster.
+ expr: |-
+ alertmanager_cluster_members{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"}
+ != on (service) GROUP_LEFT()
+ count by (service) (alertmanager_cluster_members{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"})
+ for: 5m
+ labels:
+ severity: critical
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/etcd.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/etcd.yaml
new file mode 100644
index 00000000..a68eeff2
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/etcd.yaml
@@ -0,0 +1,136 @@
+# Generated from 'etcd' group from https://raw.githubusercontent.com/etcd-io/etcd/master/Documentation/op-guide/etcd3_alert.rules.yml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.kubeEtcd.enabled .Values.defaultRules.rules.etcd }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "etcd" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: etcd
+ rules:
+ - alert: etcdInsufficientMembers
+ annotations:
+ message: 'etcd cluster "{{`{{ $labels.job }}`}}": insufficient members ({{`{{ $value }}`}}).'
+ expr: sum(up{job=~".*etcd.*"} == bool 1) by (job) < ((count(up{job=~".*etcd.*"}) by (job) + 1) / 2)
+ for: 3m
+ labels:
+ severity: critical
+ - alert: etcdNoLeader
+ annotations:
+ message: 'etcd cluster "{{`{{ $labels.job }}`}}": member {{`{{ $labels.instance }}`}} has no leader.'
+ expr: etcd_server_has_leader{job=~".*etcd.*"} == 0
+ for: 1m
+ labels:
+ severity: critical
+ - alert: etcdHighNumberOfLeaderChanges
+ annotations:
+ message: 'etcd cluster "{{`{{ $labels.job }}`}}": instance {{`{{ $labels.instance }}`}} has seen {{`{{ $value }}`}} leader changes within the last hour.'
+ expr: rate(etcd_server_leader_changes_seen_total{job=~".*etcd.*"}[15m]) > 3
+ for: 15m
+ labels:
+ severity: warning
+ - alert: etcdHighNumberOfFailedGRPCRequests
+ annotations:
+ message: 'etcd cluster "{{`{{ $labels.job }}`}}": {{`{{ $value }}`}}% of requests for {{`{{ $labels.grpc_method }}`}} failed on etcd instance {{`{{ $labels.instance }}`}}.'
+ expr: |-
+ 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code!="OK"}[5m])) BY (job, instance, grpc_service, grpc_method)
+ /
+ sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) BY (job, instance, grpc_service, grpc_method)
+ > 1
+ for: 10m
+ labels:
+ severity: warning
+ - alert: etcdHighNumberOfFailedGRPCRequests
+ annotations:
+ message: 'etcd cluster "{{`{{ $labels.job }}`}}": {{`{{ $value }}`}}% of requests for {{`{{ $labels.grpc_method }}`}} failed on etcd instance {{`{{ $labels.instance }}`}}.'
+ expr: |-
+ 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code!="OK"}[5m])) BY (job, instance, grpc_service, grpc_method)
+ /
+ sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) BY (job, instance, grpc_service, grpc_method)
+ > 5
+ for: 5m
+ labels:
+ severity: critical
+ - alert: etcdGRPCRequestsSlow
+ annotations:
+ message: 'etcd cluster "{{`{{ $labels.job }}`}}": gRPC requests to {{`{{ $labels.grpc_method }}`}} are taking {{`{{ $value }}`}}s on etcd instance {{`{{ $labels.instance }}`}}.'
+ expr: |-
+ histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_type="unary"}[5m])) by (job, instance, grpc_service, grpc_method, le))
+ > 0.15
+ for: 10m
+ labels:
+ severity: critical
+ - alert: etcdMemberCommunicationSlow
+ annotations:
+ message: 'etcd cluster "{{`{{ $labels.job }}`}}": member communication with {{`{{ $labels.To }}`}} is taking {{`{{ $value }}`}}s on etcd instance {{`{{ $labels.instance }}`}}.'
+ expr: |-
+ histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m]))
+ > 0.15
+ for: 10m
+ labels:
+ severity: warning
+ - alert: etcdHighNumberOfFailedProposals
+ annotations:
+ message: 'etcd cluster "{{`{{ $labels.job }}`}}": {{`{{ $value }}`}} proposal failures within the last hour on etcd instance {{`{{ $labels.instance }}`}}.'
+ expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m]) > 5
+ for: 15m
+ labels:
+ severity: warning
+ - alert: etcdHighFsyncDurations
+ annotations:
+ message: 'etcd cluster "{{`{{ $labels.job }}`}}": 99th percentile fync durations are {{`{{ $value }}`}}s on etcd instance {{`{{ $labels.instance }}`}}.'
+ expr: |-
+ histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~".*etcd.*"}[5m]))
+ > 0.5
+ for: 10m
+ labels:
+ severity: warning
+ - alert: etcdHighCommitDurations
+ annotations:
+ message: 'etcd cluster "{{`{{ $labels.job }}`}}": 99th percentile commit durations {{`{{ $value }}`}}s on etcd instance {{`{{ $labels.instance }}`}}.'
+ expr: |-
+ histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket{job=~".*etcd.*"}[5m]))
+ > 0.25
+ for: 10m
+ labels:
+ severity: warning
+ - alert: etcdHighNumberOfFailedHTTPRequests
+ annotations:
+ message: '{{`{{ $value }}`}}% of requests for {{`{{ $labels.method }}`}} failed on etcd instance {{`{{ $labels.instance }}`}}'
+ expr: |-
+ sum(rate(etcd_http_failed_total{job=~".*etcd.*", code!="404"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=~".*etcd.*"}[5m]))
+ BY (method) > 0.01
+ for: 10m
+ labels:
+ severity: warning
+ - alert: etcdHighNumberOfFailedHTTPRequests
+ annotations:
+ message: '{{`{{ $value }}`}}% of requests for {{`{{ $labels.method }}`}} failed on etcd instance {{`{{ $labels.instance }}`}}.'
+ expr: |-
+ sum(rate(etcd_http_failed_total{job=~".*etcd.*", code!="404"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=~".*etcd.*"}[5m]))
+ BY (method) > 0.05
+ for: 10m
+ labels:
+ severity: critical
+ - alert: etcdHTTPRequestsSlow
+ annotations:
+ message: etcd instance {{`{{ $labels.instance }}`}} HTTP requests to {{`{{ $labels.method }}`}} are slow.
+ expr: |-
+ histogram_quantile(0.99, rate(etcd_http_successful_duration_seconds_bucket[5m]))
+ > 0.15
+ for: 10m
+ labels:
+ severity: warning
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/general.rules.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/general.rules.yaml
new file mode 100644
index 00000000..9f8349f9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/general.rules.yaml
@@ -0,0 +1,46 @@
+# Generated from 'general.rules' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.defaultRules.rules.general }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "general.rules" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: general.rules
+ rules:
+ - alert: TargetDown
+ annotations:
+ message: '{{`{{ $value }}`}}% of the {{`{{ $labels.job }}`}} targets are down.'
+ expr: 100 * (count(up == 0) BY (job) / count(up) BY (job)) > 10
+ for: 10m
+ labels:
+ severity: warning
+ - alert: Watchdog
+ annotations:
+ message: 'This is an alert meant to ensure that the entire alerting pipeline is functional.
+
+ This alert is always firing, therefore it should always be firing in Alertmanager
+
+ and always fire against a receiver. There are integrations with various notification
+
+ mechanisms that send a notification when this alert is not firing. For example the
+
+ "DeadMansSnitch" integration in PagerDuty.
+
+ '
+ expr: vector(1)
+ labels:
+ severity: none
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/k8s.rules.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/k8s.rules.yaml
new file mode 100644
index 00000000..678df008
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/k8s.rules.yaml
@@ -0,0 +1,60 @@
+# Generated from 'k8s.rules' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.defaultRules.rules.k8s }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "k8s.rules" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: k8s.rules
+ rules:
+ - expr: sum(rate(container_cpu_usage_seconds_total{job="kubelet", image!="", container_name!=""}[5m])) by (namespace)
+ record: namespace:container_cpu_usage_seconds_total:sum_rate
+ - expr: |-
+ sum by (namespace, pod_name, container_name) (
+ rate(container_cpu_usage_seconds_total{job="kubelet", image!="", container_name!=""}[5m])
+ )
+ record: namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate
+ - expr: sum(container_memory_usage_bytes{job="kubelet", image!="", container_name!=""}) by (namespace)
+ record: namespace:container_memory_usage_bytes:sum
+ - expr: |-
+ sum by (namespace, label_name) (
+ sum(rate(container_cpu_usage_seconds_total{job="kubelet", image!="", container_name!=""}[5m])) by (namespace, pod_name)
+ * on (namespace, pod_name) group_left(label_name)
+ label_replace(kube_pod_labels{job="kube-state-metrics"}, "pod_name", "$1", "pod", "(.*)")
+ )
+ record: namespace_name:container_cpu_usage_seconds_total:sum_rate
+ - expr: |-
+ sum by (namespace, label_name) (
+ sum(container_memory_usage_bytes{job="kubelet",image!="", container_name!=""}) by (pod_name, namespace)
+ * on (namespace, pod_name) group_left(label_name)
+ label_replace(kube_pod_labels{job="kube-state-metrics"}, "pod_name", "$1", "pod", "(.*)")
+ )
+ record: namespace_name:container_memory_usage_bytes:sum
+ - expr: |-
+ sum by (namespace, label_name) (
+ sum(kube_pod_container_resource_requests_memory_bytes{job="kube-state-metrics"}) by (namespace, pod)
+ * on (namespace, pod) group_left(label_name)
+ label_replace(kube_pod_labels{job="kube-state-metrics"}, "pod_name", "$1", "pod", "(.*)")
+ )
+ record: namespace_name:kube_pod_container_resource_requests_memory_bytes:sum
+ - expr: |-
+ sum by (namespace, label_name) (
+ sum(kube_pod_container_resource_requests_cpu_cores{job="kube-state-metrics"} and on(pod) kube_pod_status_scheduled{condition="true"}) by (namespace, pod)
+ * on (namespace, pod) group_left(label_name)
+ label_replace(kube_pod_labels{job="kube-state-metrics"}, "pod_name", "$1", "pod", "(.*)")
+ )
+ record: namespace_name:kube_pod_container_resource_requests_cpu_cores:sum
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-apiserver.rules.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-apiserver.rules.yaml
new file mode 100644
index 00000000..cbb19cb7
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-apiserver.rules.yaml
@@ -0,0 +1,35 @@
+# Generated from 'kube-apiserver.rules' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserver }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "kube-apiserver.rules" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: kube-apiserver.rules
+ rules:
+ - expr: histogram_quantile(0.99, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.99'
+ record: cluster_quantile:apiserver_request_latencies:histogram_quantile
+ - expr: histogram_quantile(0.9, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.9'
+ record: cluster_quantile:apiserver_request_latencies:histogram_quantile
+ - expr: histogram_quantile(0.5, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.5'
+ record: cluster_quantile:apiserver_request_latencies:histogram_quantile
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml
new file mode 100644
index 00000000..2df9a096
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml
@@ -0,0 +1,37 @@
+# Generated from 'kube-prometheus-node-alerting.rules' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.defaultRules.rules.kubePrometheusNodeAlerting }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "kube-prometheus-node-alerting.rules" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: kube-prometheus-node-alerting.rules
+ rules:
+ - alert: NodeDiskRunningFull
+ annotations:
+ message: Device {{`{{ $labels.device }}`}} of node-exporter {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod }}`}} will be full within the next 24 hours.
+ expr: '(node:node_filesystem_usage: > 0.85) and (predict_linear(node:node_filesystem_avail:[6h], 3600 * 24) < 0)'
+ for: 30m
+ labels:
+ severity: warning
+ - alert: NodeDiskRunningFull
+ annotations:
+ message: Device {{`{{ $labels.device }}`}} of node-exporter {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod }}`}} will be full within the next 2 hours.
+ expr: '(node:node_filesystem_usage: > 0.85) and (predict_linear(node:node_filesystem_avail:[30m], 3600 * 2) < 0)'
+ for: 10m
+ labels:
+ severity: critical
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml
new file mode 100644
index 00000000..0d2ff510
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml
@@ -0,0 +1,37 @@
+# Generated from 'kube-prometheus-node-recording.rules' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.defaultRules.rules.kubePrometheusNodeRecording }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "kube-prometheus-node-recording.rules" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: kube-prometheus-node-recording.rules
+ rules:
+ - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[3m])) BY (instance)
+ record: instance:node_cpu:rate:sum
+ - expr: sum((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"})) BY (instance)
+ record: instance:node_filesystem_usage:sum
+ - expr: sum(rate(node_network_receive_bytes_total[3m])) BY (instance)
+ record: instance:node_network_receive_bytes:rate:sum
+ - expr: sum(rate(node_network_transmit_bytes_total[3m])) BY (instance)
+ record: instance:node_network_transmit_bytes:rate:sum
+ - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[5m])) WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) BY (instance, cpu)) BY (instance)
+ record: instance:node_cpu:ratio
+ - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[5m]))
+ record: cluster:node_cpu:sum_rate5m
+ - expr: cluster:node_cpu_seconds_total:rate5m / count(sum(node_cpu_seconds_total) BY (instance, cpu))
+ record: cluster:node_cpu:ratio
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-scheduler.rules.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-scheduler.rules.yaml
new file mode 100644
index 00000000..e51b0181
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kube-scheduler.rules.yaml
@@ -0,0 +1,59 @@
+# Generated from 'kube-scheduler.rules' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.kubeScheduler.enabled .Values.defaultRules.rules.kubeScheduler }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "kube-scheduler.rules" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: kube-scheduler.rules
+ rules:
+ - expr: histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_latency_microseconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.99'
+ record: cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile
+ - expr: histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_latency_microseconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.99'
+ record: cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile
+ - expr: histogram_quantile(0.99, sum(rate(scheduler_binding_latency_microseconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.99'
+ record: cluster_quantile:scheduler_binding_latency:histogram_quantile
+ - expr: histogram_quantile(0.9, sum(rate(scheduler_e2e_scheduling_latency_microseconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.9'
+ record: cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile
+ - expr: histogram_quantile(0.9, sum(rate(scheduler_scheduling_algorithm_latency_microseconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.9'
+ record: cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile
+ - expr: histogram_quantile(0.9, sum(rate(scheduler_binding_latency_microseconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.9'
+ record: cluster_quantile:scheduler_binding_latency:histogram_quantile
+ - expr: histogram_quantile(0.5, sum(rate(scheduler_e2e_scheduling_latency_microseconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.5'
+ record: cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile
+ - expr: histogram_quantile(0.5, sum(rate(scheduler_scheduling_algorithm_latency_microseconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.5'
+ record: cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile
+ - expr: histogram_quantile(0.5, sum(rate(scheduler_binding_latency_microseconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) / 1e+06
+ labels:
+ quantile: '0.5'
+ record: cluster_quantile:scheduler_binding_latency:histogram_quantile
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-absent.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-absent.yaml
new file mode 100644
index 00000000..19b09491
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-absent.yaml
@@ -0,0 +1,123 @@
+# Generated from 'kubernetes-absent' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.defaultRules.rules.kubernetesAbsent }}
+{{- $operatorJob := printf "%s-%s" (include "prometheus-operator.fullname" .) "operator" }}
+{{- $prometheusJob := printf "%s-%s" (include "prometheus-operator.fullname" .) "prometheus" }}
+{{- $alertmanagerJob := printf "%s-%s" (include "prometheus-operator.fullname" .) "alertmanager" }}
+{{- $namespace := .Release.Namespace }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "kubernetes-absent" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: kubernetes-absent
+ rules:
+ - alert: AlertmanagerDown
+ annotations:
+ message: Alertmanager has disappeared from Prometheus target discovery.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-alertmanagerdown
+ expr: absent(up{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"} == 1)
+ for: 15m
+ labels:
+ severity: critical
+{{- if .Values.kubeDns.enabled }}
+ - alert: CoreDNSDown
+ annotations:
+ message: CoreDNS has disappeared from Prometheus target discovery.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-corednsdown
+ expr: absent(up{job="kube-dns"} == 1)
+ for: 15m
+ labels:
+ severity: critical
+{{- end }}
+{{- if .Values.kubeApiServer.enabled }}
+ - alert: KubeAPIDown
+ annotations:
+ message: KubeAPI has disappeared from Prometheus target discovery.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapidown
+ expr: absent(up{job="apiserver"} == 1)
+ for: 15m
+ labels:
+ severity: critical
+{{- end }}
+{{- if .Values.kubeControllerManager.enabled }}
+ - alert: KubeControllerManagerDown
+ annotations:
+ message: KubeControllerManager has disappeared from Prometheus target discovery.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontrollermanagerdown
+ expr: absent(up{job="kube-controller-manager"} == 1)
+ for: 15m
+ labels:
+ severity: critical
+{{- end }}
+{{- if .Values.kubeScheduler.enabled }}
+ - alert: KubeSchedulerDown
+ annotations:
+ message: KubeScheduler has disappeared from Prometheus target discovery.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeschedulerdown
+ expr: absent(up{job="kube-scheduler"} == 1)
+ for: 15m
+ labels:
+ severity: critical
+{{- end }}
+{{- if .Values.kubeStateMetrics.enabled }}
+ - alert: KubeStateMetricsDown
+ annotations:
+ message: KubeStateMetrics has disappeared from Prometheus target discovery.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatemetricsdown
+ expr: absent(up{job="kube-state-metrics"} == 1)
+ for: 15m
+ labels:
+ severity: critical
+{{- end }}
+{{- if .Values.prometheusOperator.kubeletService.enabled }}
+ - alert: KubeletDown
+ annotations:
+ message: Kubelet has disappeared from Prometheus target discovery.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletdown
+ expr: absent(up{job="kubelet"} == 1)
+ for: 15m
+ labels:
+ severity: critical
+{{- end }}
+{{- if .Values.nodeExporter.enabled }}
+ - alert: NodeExporterDown
+ annotations:
+ message: NodeExporter has disappeared from Prometheus target discovery.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodeexporterdown
+ expr: absent(up{job="node-exporter"} == 1)
+ for: 15m
+ labels:
+ severity: critical
+{{- end }}
+ - alert: PrometheusDown
+ annotations:
+ message: Prometheus has disappeared from Prometheus target discovery.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-prometheusdown
+ expr: absent(up{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} == 1)
+ for: 15m
+ labels:
+ severity: critical
+{{- if .Values.prometheusOperator.enabled }}
+ - alert: PrometheusOperatorDown
+ annotations:
+ message: PrometheusOperator has disappeared from Prometheus target discovery.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-prometheusoperatordown
+ expr: absent(up{job="{{ $operatorJob }}",namespace="{{ $namespace }}"} == 1)
+ for: 15m
+ labels:
+ severity: critical
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-apps.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-apps.yaml
new file mode 100644
index 00000000..d3d2c498
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-apps.yaml
@@ -0,0 +1,156 @@
+# Generated from 'kubernetes-apps' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.kubeStateMetrics.enabled .Values.defaultRules.rules.kubernetesApps }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "kubernetes-apps" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: kubernetes-apps
+ rules:
+ - alert: KubePodCrashLooping
+ annotations:
+ message: Pod {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod }}`}} ({{`{{ $labels.container }}`}}) is restarting {{`{{ printf "%.2f" $value }}`}} times / 5 minutes.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodcrashlooping
+ expr: rate(kube_pod_container_status_restarts_total{job="kube-state-metrics"}[15m]) * 60 * 5 > 0
+ for: 1h
+ labels:
+ severity: critical
+ - alert: KubePodNotReady
+ annotations:
+ message: Pod {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod }}`}} has been in a non-ready state for longer than an hour.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodnotready
+ expr: sum by (namespace, pod) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Pending|Unknown"}) > 0
+ for: 1h
+ labels:
+ severity: critical
+ - alert: KubeDeploymentGenerationMismatch
+ annotations:
+ message: Deployment generation for {{`{{ $labels.namespace }}`}}/{{`{{ $labels.deployment }}`}} does not match, this indicates that the Deployment has failed but has not been rolled back.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentgenerationmismatch
+ expr: |-
+ kube_deployment_status_observed_generation{job="kube-state-metrics"}
+ !=
+ kube_deployment_metadata_generation{job="kube-state-metrics"}
+ for: 15m
+ labels:
+ severity: critical
+ - alert: KubeDeploymentReplicasMismatch
+ annotations:
+ message: Deployment {{`{{ $labels.namespace }}`}}/{{`{{ $labels.deployment }}`}} has not matched the expected number of replicas for longer than an hour.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentreplicasmismatch
+ expr: |-
+ kube_deployment_spec_replicas{job="kube-state-metrics"}
+ !=
+ kube_deployment_status_replicas_available{job="kube-state-metrics"}
+ for: 1h
+ labels:
+ severity: critical
+ - alert: KubeStatefulSetReplicasMismatch
+ annotations:
+ message: StatefulSet {{`{{ $labels.namespace }}`}}/{{`{{ $labels.statefulset }}`}} has not matched the expected number of replicas for longer than 15 minutes.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetreplicasmismatch
+ expr: |-
+ kube_statefulset_status_replicas_ready{job="kube-state-metrics"}
+ !=
+ kube_statefulset_status_replicas{job="kube-state-metrics"}
+ for: 15m
+ labels:
+ severity: critical
+ - alert: KubeStatefulSetGenerationMismatch
+ annotations:
+ message: StatefulSet generation for {{`{{ $labels.namespace }}`}}/{{`{{ $labels.statefulset }}`}} does not match, this indicates that the StatefulSet has failed but has not been rolled back.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetgenerationmismatch
+ expr: |-
+ kube_statefulset_status_observed_generation{job="kube-state-metrics"}
+ !=
+ kube_statefulset_metadata_generation{job="kube-state-metrics"}
+ for: 15m
+ labels:
+ severity: critical
+ - alert: KubeStatefulSetUpdateNotRolledOut
+ annotations:
+ message: StatefulSet {{`{{ $labels.namespace }}`}}/{{`{{ $labels.statefulset }}`}} update has not been rolled out.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetupdatenotrolledout
+ expr: |-
+ max without (revision) (
+ kube_statefulset_status_current_revision{job="kube-state-metrics"}
+ unless
+ kube_statefulset_status_update_revision{job="kube-state-metrics"}
+ )
+ *
+ (
+ kube_statefulset_replicas{job="kube-state-metrics"}
+ !=
+ kube_statefulset_status_replicas_updated{job="kube-state-metrics"}
+ )
+ for: 15m
+ labels:
+ severity: critical
+ - alert: KubeDaemonSetRolloutStuck
+ annotations:
+ message: Only {{`{{ $value }}`}}% of the desired Pods of DaemonSet {{`{{ $labels.namespace }}`}}/{{`{{ $labels.daemonset }}`}} are scheduled and ready.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetrolloutstuck
+ expr: |-
+ kube_daemonset_status_number_ready{job="kube-state-metrics"}
+ /
+ kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics"} * 100 < 100
+ for: 15m
+ labels:
+ severity: critical
+ - alert: KubeDaemonSetNotScheduled
+ annotations:
+ message: '{{`{{ $value }}`}} Pods of DaemonSet {{`{{ $labels.namespace }}`}}/{{`{{ $labels.daemonset }}`}} are not scheduled.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetnotscheduled
+ expr: |-
+ kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics"}
+ -
+ kube_daemonset_status_current_number_scheduled{job="kube-state-metrics"} > 0
+ for: 10m
+ labels:
+ severity: warning
+ - alert: KubeDaemonSetMisScheduled
+ annotations:
+ message: '{{`{{ $value }}`}} Pods of DaemonSet {{`{{ $labels.namespace }}`}}/{{`{{ $labels.daemonset }}`}} are running where they are not supposed to run.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetmisscheduled
+ expr: kube_daemonset_status_number_misscheduled{job="kube-state-metrics"} > 0
+ for: 10m
+ labels:
+ severity: warning
+ - alert: KubeCronJobRunning
+ annotations:
+ message: CronJob {{`{{ $labels.namespace }}`}}/{{`{{ $labels.cronjob }}`}} is taking more than 1h to complete.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecronjobrunning
+ expr: time() - kube_cronjob_next_schedule_time{job="kube-state-metrics"} > 3600
+ for: 1h
+ labels:
+ severity: warning
+ - alert: KubeJobCompletion
+ annotations:
+ message: Job {{`{{ $labels.namespace }}`}}/{{`{{ $labels.job_name }}`}} is taking more than one hour to complete.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion
+ expr: kube_job_spec_completions{job="kube-state-metrics"} - kube_job_status_succeeded{job="kube-state-metrics"} > 0
+ for: 1h
+ labels:
+ severity: warning
+ - alert: KubeJobFailed
+ annotations:
+ message: Job {{`{{ $labels.namespace }}`}}/{{`{{ $labels.job_name }}`}} failed to complete.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobfailed
+ expr: kube_job_status_failed{job="kube-state-metrics"} > 0
+ for: 1h
+ labels:
+ severity: warning
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-resources.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-resources.yaml
new file mode 100644
index 00000000..ed4a83c6
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-resources.yaml
@@ -0,0 +1,99 @@
+# Generated from 'kubernetes-resources' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.defaultRules.rules.kubernetesResources }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "kubernetes-resources" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: kubernetes-resources
+ rules:
+ - alert: KubeCPUOvercommit
+ annotations:
+ message: Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuovercommit
+ expr: |-
+ sum(namespace_name:kube_pod_container_resource_requests_cpu_cores:sum)
+ /
+ sum(node:node_num_cpu:sum)
+ >
+ (count(node:node_num_cpu:sum)-1) / count(node:node_num_cpu:sum)
+ for: 5m
+ labels:
+ severity: warning
+ - alert: KubeMemOvercommit
+ annotations:
+ message: Cluster has overcommitted memory resource requests for Pods and cannot tolerate node failure.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubememovercommit
+ expr: |-
+ sum(namespace_name:kube_pod_container_resource_requests_memory_bytes:sum)
+ /
+ sum(node_memory_MemTotal_bytes)
+ >
+ (count(node:node_num_cpu:sum)-1)
+ /
+ count(node:node_num_cpu:sum)
+ for: 5m
+ labels:
+ severity: warning
+ - alert: KubeCPUOvercommit
+ annotations:
+ message: Cluster has overcommitted CPU resource requests for Namespaces.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuovercommit
+ expr: |-
+ sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="requests.cpu"})
+ /
+ sum(node:node_num_cpu:sum)
+ > 1.5
+ for: 5m
+ labels:
+ severity: warning
+ - alert: KubeMemOvercommit
+ annotations:
+ message: Cluster has overcommitted memory resource requests for Namespaces.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubememovercommit
+ expr: |-
+ sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="requests.memory"})
+ /
+ sum(node_memory_MemTotal_bytes{job="node-exporter"})
+ > 1.5
+ for: 5m
+ labels:
+ severity: warning
+ - alert: KubeQuotaExceeded
+ annotations:
+ message: Namespace {{`{{ $labels.namespace }}`}} is using {{`{{ printf "%0.0f" $value }}`}}% of its {{`{{ $labels.resource }}`}} quota.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubequotaexceeded
+ expr: |-
+ 100 * kube_resourcequota{job="kube-state-metrics", type="used"}
+ / ignoring(instance, job, type)
+ (kube_resourcequota{job="kube-state-metrics", type="hard"} > 0)
+ > 90
+ for: 15m
+ labels:
+ severity: warning
+ - alert: CPUThrottlingHigh
+ annotations:
+ message: '{{`{{ printf "%0.0f" $value }}`}}% throttling of CPU in namespace {{`{{ $labels.namespace }}`}} for container {{`{{ $labels.container_name }}`}} in pod {{`{{ $labels.pod_name }}`}}.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh
+ expr: |-
+ 100 * sum(increase(container_cpu_cfs_throttled_periods_total{container_name!="", }[5m])) by (container_name, pod_name, namespace)
+ /
+ sum(increase(container_cpu_cfs_periods_total{}[5m])) by (container_name, pod_name, namespace)
+ > 25
+ for: 15m
+ labels:
+ severity: warning
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-storage.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-storage.yaml
new file mode 100644
index 00000000..edd8f5fc
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-storage.yaml
@@ -0,0 +1,58 @@
+# Generated from 'kubernetes-storage' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.defaultRules.rules.kubernetesStorage }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "kubernetes-storage" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: kubernetes-storage
+ rules:
+ - alert: KubePersistentVolumeUsageCritical
+ annotations:
+ message: The PersistentVolume claimed by {{`{{ $labels.persistentvolumeclaim }}`}} in Namespace {{`{{ $labels.namespace }}`}} is only {{`{{ printf "%0.2f" $value }}`}}% free.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumeusagecritical
+ expr: |-
+ 100 * kubelet_volume_stats_available_bytes{job="kubelet"}
+ /
+ kubelet_volume_stats_capacity_bytes{job="kubelet"}
+ < 3
+ for: 1m
+ labels:
+ severity: critical
+ - alert: KubePersistentVolumeFullInFourDays
+ annotations:
+ message: Based on recent sampling, the PersistentVolume claimed by {{`{{ $labels.persistentvolumeclaim }}`}} in Namespace {{`{{ $labels.namespace }}`}} is expected to fill up within four days. Currently {{`{{ printf "%0.2f" $value }}`}}% is available.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumefullinfourdays
+ expr: |-
+ 100 * (
+ kubelet_volume_stats_available_bytes{job="kubelet"}
+ /
+ kubelet_volume_stats_capacity_bytes{job="kubelet"}
+ ) < 15
+ and
+ predict_linear(kubelet_volume_stats_available_bytes{job="kubelet"}[6h], 4 * 24 * 3600) < 0
+ for: 5m
+ labels:
+ severity: critical
+ - alert: KubePersistentVolumeErrors
+ annotations:
+ message: The persistent volume {{`{{ $labels.persistentvolume }}`}} has status {{`{{ $labels.phase }}`}}.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumeerrors
+ expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"} > 0
+ for: 5m
+ labels:
+ severity: critical
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-system.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-system.yaml
new file mode 100644
index 00000000..8ccfa5bf
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/kubernetes-system.yaml
@@ -0,0 +1,119 @@
+# Generated from 'kubernetes-system' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.defaultRules.rules.kubernetesSystem }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "kubernetes-system" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: kubernetes-system
+ rules:
+ - alert: KubeNodeNotReady
+ annotations:
+ message: '{{`{{ $labels.node }}`}} has been unready for more than an hour.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubenodenotready
+ expr: kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0
+ for: 1h
+ labels:
+ severity: warning
+ - alert: KubeVersionMismatch
+ annotations:
+ message: There are {{`{{ $value }}`}} different semantic versions of Kubernetes components running.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeversionmismatch
+ expr: count(count by (gitVersion) (label_replace(kubernetes_build_info{job!="kube-dns"},"gitVersion","$1","gitVersion","(v[0-9]*.[0-9]*.[0-9]*).*"))) > 1
+ for: 1h
+ labels:
+ severity: warning
+ - alert: KubeClientErrors
+ annotations:
+ message: Kubernetes API server client '{{`{{ $labels.job }}`}}/{{`{{ $labels.instance }}`}}' is experiencing {{`{{ printf "%0.0f" $value }}`}}% errors.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclienterrors
+ expr: |-
+ (sum(rate(rest_client_requests_total{code=~"5.."}[5m])) by (instance, job)
+ /
+ sum(rate(rest_client_requests_total[5m])) by (instance, job))
+ * 100 > 1
+ for: 15m
+ labels:
+ severity: warning
+ - alert: KubeClientErrors
+ annotations:
+ message: Kubernetes API server client '{{`{{ $labels.job }}`}}/{{`{{ $labels.instance }}`}}' is experiencing {{`{{ printf "%0.0f" $value }}`}} errors / second.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclienterrors
+ expr: sum(rate(ksm_scrape_error_total{job="kube-state-metrics"}[5m])) by (instance, job) > 0.1
+ for: 15m
+ labels:
+ severity: warning
+ - alert: KubeletTooManyPods
+ annotations:
+ message: Kubelet {{`{{ $labels.instance }}`}} is running {{`{{ $value }}`}} Pods, close to the limit of 110.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubelettoomanypods
+ expr: kubelet_running_pod_count{job="kubelet"} > 110 * 0.9
+ for: 15m
+ labels:
+ severity: warning
+ - alert: KubeAPILatencyHigh
+ annotations:
+ message: The API server has a 99th percentile latency of {{`{{ $value }}`}} seconds for {{`{{ $labels.verb }}`}} {{`{{ $labels.resource }}`}}.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapilatencyhigh
+ expr: cluster_quantile:apiserver_request_latencies:histogram_quantile{job="apiserver",quantile="0.99",subresource!="log",verb!~"^(?:LIST|WATCH|WATCHLIST|PROXY|CONNECT)$"} > 1
+ for: 10m
+ labels:
+ severity: warning
+ - alert: KubeAPILatencyHigh
+ annotations:
+ message: The API server has a 99th percentile latency of {{`{{ $value }}`}} seconds for {{`{{ $labels.verb }}`}} {{`{{ $labels.resource }}`}}.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapilatencyhigh
+ expr: cluster_quantile:apiserver_request_latencies:histogram_quantile{job="apiserver",quantile="0.99",subresource!="log",verb!~"^(?:LIST|WATCH|WATCHLIST|PROXY|CONNECT)$"} > 4
+ for: 10m
+ labels:
+ severity: critical
+ - alert: KubeAPIErrorsHigh
+ annotations:
+ message: API server is returning errors for {{`{{ $value }}`}}% of requests.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapierrorshigh
+ expr: |-
+ sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[5m])) without(instance, pod)
+ /
+ sum(rate(apiserver_request_count{job="apiserver"}[5m])) without(instance, pod) * 100 > 10
+ for: 10m
+ labels:
+ severity: critical
+ - alert: KubeAPIErrorsHigh
+ annotations:
+ message: API server is returning errors for {{`{{ $value }}`}}% of requests.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapierrorshigh
+ expr: |-
+ sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[5m])) without(instance, pod)
+ /
+ sum(rate(apiserver_request_count{job="apiserver"}[5m])) without(instance, pod) * 100 > 5
+ for: 10m
+ labels:
+ severity: warning
+ - alert: KubeClientCertificateExpiration
+ annotations:
+ message: A client certificate used to authenticate to the apiserver is expiring in less than 7 days.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclientcertificateexpiration
+ expr: histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 604800
+ labels:
+ severity: warning
+ - alert: KubeClientCertificateExpiration
+ annotations:
+ message: A client certificate used to authenticate to the apiserver is expiring in less than 24 hours.
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclientcertificateexpiration
+ expr: histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 86400
+ labels:
+ severity: critical
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/node.rules.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/node.rules.yaml
new file mode 100644
index 00000000..35245437
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/node.rules.yaml
@@ -0,0 +1,198 @@
+# Generated from 'node.rules' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.nodeExporter.enabled .Values.defaultRules.rules.node }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "node.rules" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: node.rules
+ rules:
+ - expr: sum(min(kube_pod_info) by (node))
+ record: ':kube_pod_info_node_count:'
+ - expr: max(label_replace(kube_pod_info{job="kube-state-metrics"}, "pod", "$1", "pod", "(.*)")) by (node, namespace, pod)
+ record: 'node_namespace_pod:kube_pod_info:'
+ - expr: |-
+ count by (node) (sum by (node, cpu) (
+ node_cpu_seconds_total{job="node-exporter"}
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:
+ ))
+ record: node:node_num_cpu:sum
+ - expr: 1 - avg(rate(node_cpu_seconds_total{job="node-exporter",mode="idle"}[1m]))
+ record: :node_cpu_utilisation:avg1m
+ - expr: |-
+ 1 - avg by (node) (
+ rate(node_cpu_seconds_total{job="node-exporter",mode="idle"}[1m])
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:)
+ record: node:node_cpu_utilisation:avg1m
+ - expr: |-
+ node:node_cpu_utilisation:avg1m
+ *
+ node:node_num_cpu:sum
+ /
+ scalar(sum(node:node_num_cpu:sum))
+ record: node:cluster_cpu_utilisation:ratio
+ - expr: |-
+ sum(node_load1{job="node-exporter"})
+ /
+ sum(node:node_num_cpu:sum)
+ record: ':node_cpu_saturation_load1:'
+ - expr: |-
+ sum by (node) (
+ node_load1{job="node-exporter"}
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:
+ )
+ /
+ node:node_num_cpu:sum
+ record: 'node:node_cpu_saturation_load1:'
+ - expr: |-
+ 1 -
+ sum(node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"})
+ /
+ sum(node_memory_MemTotal_bytes{job="node-exporter"})
+ record: ':node_memory_utilisation:'
+ - expr: sum(node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"})
+ record: :node_memory_MemFreeCachedBuffers_bytes:sum
+ - expr: sum(node_memory_MemTotal_bytes{job="node-exporter"})
+ record: :node_memory_MemTotal_bytes:sum
+ - expr: |-
+ sum by (node) (
+ (node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"})
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:
+ )
+ record: node:node_memory_bytes_available:sum
+ - expr: |-
+ sum by (node) (
+ node_memory_MemTotal_bytes{job="node-exporter"}
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:
+ )
+ record: node:node_memory_bytes_total:sum
+ - expr: |-
+ (node:node_memory_bytes_total:sum - node:node_memory_bytes_available:sum)
+ /
+ node:node_memory_bytes_total:sum
+ record: node:node_memory_utilisation:ratio
+ - expr: |-
+ (node:node_memory_bytes_total:sum - node:node_memory_bytes_available:sum)
+ /
+ scalar(sum(node:node_memory_bytes_total:sum))
+ record: node:cluster_memory_utilisation:ratio
+ - expr: |-
+ 1e3 * sum(
+ (rate(node_vmstat_pgpgin{job="node-exporter"}[1m])
+ + rate(node_vmstat_pgpgout{job="node-exporter"}[1m]))
+ )
+ record: :node_memory_swap_io_bytes:sum_rate
+ - expr: |-
+ 1 -
+ sum by (node) (
+ (node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"})
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:
+ )
+ /
+ sum by (node) (
+ node_memory_MemTotal_bytes{job="node-exporter"}
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:
+ )
+ record: 'node:node_memory_utilisation:'
+ - expr: 1 - (node:node_memory_bytes_available:sum / node:node_memory_bytes_total:sum)
+ record: 'node:node_memory_utilisation_2:'
+ - expr: |-
+ 1e3 * sum by (node) (
+ (rate(node_vmstat_pgpgin{job="node-exporter"}[1m])
+ + rate(node_vmstat_pgpgout{job="node-exporter"}[1m]))
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:
+ )
+ record: node:node_memory_swap_io_bytes:sum_rate
+ - expr: avg(irate(node_disk_io_time_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+"}[1m]))
+ record: :node_disk_utilisation:avg_irate
+ - expr: |-
+ avg by (node) (
+ irate(node_disk_io_time_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+"}[1m])
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:
+ )
+ record: node:node_disk_utilisation:avg_irate
+ - expr: avg(irate(node_disk_io_time_weighted_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+"}[1m]) / 1e3)
+ record: :node_disk_saturation:avg_irate
+ - expr: |-
+ avg by (node) (
+ irate(node_disk_io_time_weighted_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+"}[1m]) / 1e3
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:
+ )
+ record: node:node_disk_saturation:avg_irate
+ - expr: |-
+ max by (namespace, pod, device) ((node_filesystem_size_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"}
+ - node_filesystem_avail_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"})
+ / node_filesystem_size_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"})
+ record: 'node:node_filesystem_usage:'
+ - expr: max by (namespace, pod, device) (node_filesystem_avail_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"} / node_filesystem_size_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"})
+ record: 'node:node_filesystem_avail:'
+ - expr: |-
+ sum(irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[1m])) +
+ sum(irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[1m]))
+ record: :node_net_utilisation:sum_irate
+ - expr: |-
+ sum by (node) (
+ (irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[1m]) +
+ irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[1m]))
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:
+ )
+ record: node:node_net_utilisation:sum_irate
+ - expr: |-
+ sum(irate(node_network_receive_drop_total{job="node-exporter",device!~"veth.+"}[1m])) +
+ sum(irate(node_network_transmit_drop_total{job="node-exporter",device!~"veth.+"}[1m]))
+ record: :node_net_saturation:sum_irate
+ - expr: |-
+ sum by (node) (
+ (irate(node_network_receive_drop_total{job="node-exporter",device!~"veth.+"}[1m]) +
+ irate(node_network_transmit_drop_total{job="node-exporter",device!~"veth.+"}[1m]))
+ * on (namespace, pod) group_left(node)
+ node_namespace_pod:kube_pod_info:
+ )
+ record: node:node_net_saturation:sum_irate
+ - expr: |-
+ max(
+ max(
+ kube_pod_info{job="kube-state-metrics", host_ip!=""}
+ ) by (node, host_ip)
+ * on (host_ip) group_right (node)
+ label_replace(
+ (max(node_filesystem_files{job="node-exporter", mountpoint="/"}) by (instance)), "host_ip", "$1", "instance", "(.*):.*"
+ )
+ ) by (node)
+ record: 'node:node_inodes_total:'
+ - expr: |-
+ max(
+ max(
+ kube_pod_info{job="kube-state-metrics", host_ip!=""}
+ ) by (node, host_ip)
+ * on (host_ip) group_right (node)
+ label_replace(
+ (max(node_filesystem_files_free{job="node-exporter", mountpoint="/"}) by (instance)), "host_ip", "$1", "instance", "(.*):.*"
+ )
+ ) by (node)
+ record: 'node:node_inodes_free:'
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/prometheus-operator.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/prometheus-operator.yaml
new file mode 100644
index 00000000..774a540c
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/prometheus-operator.yaml
@@ -0,0 +1,39 @@
+# Generated from 'prometheus-operator' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.defaultRules.rules.prometheusOperator }}
+{{- $operatorJob := printf "%s-%s" (include "prometheus-operator.fullname" .) "operator" }}
+{{- $namespace := .Release.Namespace }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "prometheus-operator" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: prometheus-operator
+ rules:
+ - alert: PrometheusOperatorReconcileErrors
+ annotations:
+ message: Errors while reconciling {{`{{ $labels.controller }}`}} in {{`{{ $labels.namespace }}`}} Namespace.
+ expr: rate(prometheus_operator_reconcile_errors_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) > 0.1
+ for: 10m
+ labels:
+ severity: warning
+ - alert: PrometheusOperatorNodeLookupErrors
+ annotations:
+ message: Errors while reconciling Prometheus in {{`{{ $labels.namespace }}`}} Namespace.
+ expr: rate(prometheus_operator_node_address_lookup_errors_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) > 0.1
+ for: 10m
+ labels:
+ severity: warning
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/prometheus.rules.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/prometheus.rules.yaml
new file mode 100644
index 00000000..3c9e1490
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/rules/prometheus.rules.yaml
@@ -0,0 +1,105 @@
+# Generated from 'prometheus.rules' group from https://raw.githubusercontent.com/coreos/prometheus-operator/master/contrib/kube-prometheus/manifests/prometheus-rules.yaml
+# Do not change in-place! In order to change this file first read following link:
+# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
+{{- if and .Values.defaultRules.create .Values.defaultRules.rules.prometheus }}
+{{- $prometheusJob := printf "%s-%s" (include "prometheus-operator.fullname" .) "prometheus" }}
+{{- $namespace := .Release.Namespace }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: PrometheusRule
+metadata:
+ name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "prometheus.rules" | trunc 63 | trimSuffix "-" }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.defaultRules.labels }}
+{{ toYaml .Values.defaultRules.labels | indent 4 }}
+{{- end }}
+{{- if .Values.defaultRules.annotations }}
+ annotations:
+{{ toYaml .Values.defaultRules.annotations | indent 4 }}
+{{- end }}
+spec:
+ groups:
+ - name: prometheus.rules
+ rules:
+ - alert: PrometheusConfigReloadFailed
+ annotations:
+ description: Reloading Prometheus' configuration has failed for {{`{{$labels.namespace}}`}}/{{`{{$labels.pod}}`}}
+ summary: Reloading Prometheus' configuration failed
+ expr: prometheus_config_last_reload_successful{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} == 0
+ for: 10m
+ labels:
+ severity: warning
+ - alert: PrometheusNotificationQueueRunningFull
+ annotations:
+ description: Prometheus' alert notification queue is running full for {{`{{$labels.namespace}}`}}/{{`{{ $labels.pod}}`}}
+ summary: Prometheus' alert notification queue is running full
+ expr: predict_linear(prometheus_notifications_queue_length{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m], 60 * 30) > prometheus_notifications_queue_capacity{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}
+ for: 10m
+ labels:
+ severity: warning
+ - alert: PrometheusErrorSendingAlerts
+ annotations:
+ description: Errors while sending alerts from Prometheus {{`{{$labels.namespace}}`}}/{{`{{ $labels.pod}}`}} to Alertmanager {{`{{$labels.Alertmanager}}`}}
+ summary: Errors while sending alert from Prometheus
+ expr: rate(prometheus_notifications_errors_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) / rate(prometheus_notifications_sent_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0.01
+ for: 10m
+ labels:
+ severity: warning
+ - alert: PrometheusErrorSendingAlerts
+ annotations:
+ description: Errors while sending alerts from Prometheus {{`{{$labels.namespace}}`}}/{{`{{ $labels.pod}}`}} to Alertmanager {{`{{$labels.Alertmanager}}`}}
+ summary: Errors while sending alerts from Prometheus
+ expr: rate(prometheus_notifications_errors_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) / rate(prometheus_notifications_sent_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0.03
+ for: 10m
+ labels:
+ severity: critical
+ - alert: PrometheusNotConnectedToAlertmanagers
+ annotations:
+ description: Prometheus {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod}}`}} is not connected to any Alertmanagers
+ summary: Prometheus is not connected to any Alertmanagers
+ expr: prometheus_notifications_alertmanagers_discovered{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} < 1
+ for: 10m
+ labels:
+ severity: warning
+ - alert: PrometheusTSDBReloadsFailing
+ annotations:
+ description: '{{`{{$labels.job}}`}} at {{`{{$labels.instance}}`}} had {{`{{$value | humanize}}`}} reload failures over the last four hours.'
+ summary: Prometheus has issues reloading data blocks from disk
+ expr: increase(prometheus_tsdb_reloads_failures_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[2h]) > 0
+ for: 12h
+ labels:
+ severity: warning
+ - alert: PrometheusTSDBCompactionsFailing
+ annotations:
+ description: '{{`{{$labels.job}}`}} at {{`{{$labels.instance}}`}} had {{`{{$value | humanize}}`}} compaction failures over the last four hours.'
+ summary: Prometheus has issues compacting sample blocks
+ expr: increase(prometheus_tsdb_compactions_failed_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[2h]) > 0
+ for: 12h
+ labels:
+ severity: warning
+ - alert: PrometheusTSDBWALCorruptions
+ annotations:
+ description: '{{`{{$labels.job}}`}} at {{`{{$labels.instance}}`}} has a corrupted write-ahead log (WAL).'
+ summary: Prometheus write-ahead log is corrupted
+ expr: tsdb_wal_corruptions_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} > 0
+ for: 4h
+ labels:
+ severity: warning
+ - alert: PrometheusNotIngestingSamples
+ annotations:
+ description: Prometheus {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod}}`}} isn't ingesting samples.
+ summary: Prometheus isn't ingesting samples
+ expr: rate(prometheus_tsdb_head_samples_appended_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) <= 0
+ for: 10m
+ labels:
+ severity: warning
+ - alert: PrometheusTargetScrapesDuplicate
+ annotations:
+ description: '{{`{{$labels.namespace}}`}}/{{`{{$labels.pod}}`}} has many samples rejected due to duplicate timestamps but different values'
+ summary: Prometheus has many samples rejected
+ expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0
+ for: 10m
+ labels:
+ severity: warning
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/service.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/service.yaml
new file mode 100644
index 00000000..fc94f953
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/service.yaml
@@ -0,0 +1,44 @@
+{{- if .Values.prometheus.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+{{- if .Values.prometheus.service.annotations }}
+ annotations:
+{{ toYaml .Values.prometheus.service.annotations | indent 4 }}
+{{- end }}
+spec:
+{{- if .Values.prometheus.service.clusterIP }}
+ clusterIP: {{ .Values.prometheus.service.clusterIP }}
+{{- end }}
+{{- if .Values.prometheus.service.externalIPs }}
+ externalIPs:
+{{ toYaml .Values.prometheus.service.externalIPs | indent 4 }}
+{{- end }}
+{{- if .Values.prometheus.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.prometheus.service.loadBalancerIP }}
+{{- end }}
+{{- if .Values.prometheus.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- range $cidr := .Values.prometheus.service.loadBalancerSourceRanges }}
+ - {{ $cidr }}
+ {{- end }}
+{{- end }}
+ ports:
+ - name: web
+ {{- if eq .Values.prometheus.service.type "NodePort" }}
+ nodePort: {{ .Values.prometheus.service.nodePort }}
+ {{- end }}
+ port: 9090
+ targetPort: {{ .Values.prometheus.service.targetPort }}
+ selector:
+ app: prometheus
+ prometheus: {{ template "prometheus-operator.fullname" . }}-prometheus
+{{- if .Values.prometheus.service.sessionAffinity }}
+ sessionAffinity: {{ .Values.prometheus.service.sessionAffinity }}
+{{- end }}
+ type: "{{ .Values.prometheus.service.type }}"
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/serviceaccount.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/serviceaccount.yaml
new file mode 100644
index 00000000..88df10ad
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.prometheus.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "prometheus-operator.prometheus.serviceAccountName" . }}
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+imagePullSecrets:
+{{ toYaml .Values.global.imagePullSecrets | indent 2 }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/servicemonitor.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/servicemonitor.yaml
new file mode 100644
index 00000000..36790450
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/servicemonitor.yaml
@@ -0,0 +1,21 @@
+{{- if and .Values.prometheus.enabled .Values.prometheus.serviceMonitor.selfMonitor }}
+apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+kind: ServiceMonitor
+metadata:
+ name: {{ template "prometheus-operator.fullname" . }}-prometheus
+ labels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+{{ include "prometheus-operator.labels" . | indent 4 }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "prometheus-operator.name" . }}-prometheus
+ release: {{ .Release.Name | quote }}
+ namespaceSelector:
+ matchNames:
+ - {{ .Release.Namespace | quote }}
+ endpoints:
+ - port: web
+ interval: 30s
+ path: "{{ trimSuffix "/" .Values.prometheus.prometheusSpec.routePrefix }}/metrics"
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/servicemonitors.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/servicemonitors.yaml
new file mode 100644
index 00000000..61f3ca3c
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/templates/prometheus/servicemonitors.yaml
@@ -0,0 +1,29 @@
+{{- if and .Values.prometheus.enabled .Values.prometheus.additionalServiceMonitors }}
+apiVersion: v1
+kind: List
+items:
+{{- range .Values.prometheus.additionalServiceMonitors }}
+ - apiVersion: {{ printf "%s/v1" ($.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }}
+ kind: ServiceMonitor
+ metadata:
+ name: {{ .name }}
+ labels:
+ app: {{ template "prometheus-operator.name" $ }}-prometheus
+{{ include "prometheus-operator.labels" $ | indent 8 }}
+ {{- if .additionalLabels }}
+{{ toYaml .additionalLabels | indent 8 }}
+ {{- end }}
+ spec:
+ endpoints:
+{{ toYaml .endpoints | indent 8 }}
+ {{- if .jobLabel }}
+ jobLabel: {{ .jobLabel }}
+ {{- end }}
+ {{- if .namespaceSelector }}
+ namespaceSelector:
+{{ toYaml .namespaceSelector | indent 8 }}
+ {{- end }}
+ selector:
+{{ toYaml .selector | indent 8 }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/values.yaml b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/values.yaml
new file mode 100644
index 00000000..fc0bc243
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/prometheus-operator/values.yaml
@@ -0,0 +1,1148 @@
+# Default values for prometheus-operator.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+## Provide a name in place of prometheus-operator for `app:` labels
+##
+nameOverride: ""
+
+## Provide a name to substitute for the full names of resources
+##
+fullnameOverride: ""
+
+## Labels to apply to all resources
+##
+commonLabels: {}
+# scmhash: abc123
+# myLabel: aakkmd
+
+## Create default rules for monitoring the cluster
+##
+defaultRules:
+ create: true
+ rules:
+ alertmanager: true
+ etcd: true
+ general: true
+ k8s: true
+ kubeApiserver: true
+ kubePrometheusNodeAlerting: true
+ kubePrometheusNodeRecording: true
+ kubeScheduler: true
+ kubernetesAbsent: true
+ kubernetesApps: true
+ kubernetesResources: true
+ kubernetesStorage: true
+ kubernetesSystem: true
+ node: true
+ prometheusOperator: true
+ prometheus: true
+ ## Labels for default rules
+ labels: {}
+ ## Annotations for default rules
+ annotations: {}
+
+## Provide custom recording or alerting rules to be deployed into the cluster.
+##
+additionalPrometheusRules: []
+# - name: my-rule-file
+# groups:
+# - name: my_group
+# rules:
+# - record: my_record
+# expr: 100 * my_record
+
+##
+global:
+ rbac:
+ create: true
+ pspEnabled: true
+
+ ## Reference to one or more secrets to be used when pulling images
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ imagePullSecrets: []
+ # - name: "image-pull-secret"
+
+## Configuration for alertmanager
+## ref: https://prometheus.io/docs/alerting/alertmanager/
+##
+alertmanager:
+
+ ## Deploy alertmanager
+ ##
+ enabled: false
+
+ ## Service account for Alertmanager to use.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ ##
+ serviceAccount:
+ create: true
+ name: ""
+
+ ## Configure pod disruption budgets for Alertmanager
+ ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
+ ## This configuration is immutable once created and will require the PDB to be deleted to be changed
+ ## https://github.com/kubernetes/kubernetes/issues/45398
+ ##
+ podDisruptionBudget:
+ enabled: false
+ minAvailable: 1
+ maxUnavailable: ""
+
+ ## Alertmanager configuration directives
+ ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
+ ## https://prometheus.io/webtools/alerting/routing-tree-editor/
+ ##
+ config:
+ global:
+ resolve_timeout: 5m
+ route:
+ group_by: ['job']
+ group_wait: 30s
+ group_interval: 5m
+ repeat_interval: 12h
+ receiver: 'null'
+ routes:
+ - match:
+ alertname: Watchdog
+ receiver: 'null'
+ receivers:
+ - name: 'null'
+
+ ## Alertmanager template files to format alerts
+ ## ref: https://prometheus.io/docs/alerting/notifications/
+ ## https://prometheus.io/docs/alerting/notification_examples/
+ ##
+ templateFiles: {}
+ #
+ # An example template:
+ # template_1.tmpl: |-
+ # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }}
+ #
+ # {{ define "slack.myorg.text" }}
+ # {{- $root := . -}}
+ # {{ range .Alerts }}
+ # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
+ # *Cluster:* {{ template "cluster" $root }}
+ # *Description:* {{ .Annotations.description }}
+ # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:>
+ # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
+ # *Details:*
+ # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
+ # {{ end }}
+
+ ingress:
+ enabled: false
+
+ annotations: {}
+
+ labels: {}
+
+ ## Hosts must be provided if Ingress is enabled.
+ ##
+ hosts: []
+ # - alertmanager.domain.com
+
+ ## TLS configuration for Alertmanager Ingress
+ ## Secret must be manually created in the namespace
+ ##
+ tls: []
+ # - secretName: alertmanager-general-tls
+ # hosts:
+ # - alertmanager.example.com
+
+ ## Configuration for Alertmanager service
+ ##
+ service:
+ annotations: {}
+ labels: {}
+ clusterIP: ""
+
+ ## Port to expose on each node
+ ## Only used if service.type is 'NodePort'
+ ##
+ nodePort: 30903
+ ## List of IP addresses at which the Prometheus server service is available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+ loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+ ## Service type
+ ##
+ type: ClusterIP
+
+ ## If true, create a serviceMonitor for alertmanager
+ ##
+ serviceMonitor:
+ selfMonitor: true
+
+ ## Settings affecting alertmanagerSpec
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec
+ ##
+ alertmanagerSpec:
+ ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
+ ## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
+ ##
+ podMetadata: {}
+
+ ## Image of Alertmanager
+ ##
+ image:
+ repository: quay.io/prometheus/alertmanager
+ tag: v0.16.1
+
+ ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the
+ ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.
+ ##
+ secrets: []
+
+ ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods.
+ ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/.
+ ##
+ configMaps: []
+
+ ## Log level for Alertmanager to be configured with.
+ ##
+ logLevel: info
+
+ ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the
+ ## running cluster equal to the expected size.
+ replicas: 1
+
+ ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression
+ ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
+ ##
+ retention: 120h
+
+ ## Storage is the definition of how storage will be used by the Alertmanager instances.
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md
+ ##
+ storage: {}
+ # volumeClaimTemplate:
+ # spec:
+ # storageClassName: gluster
+ # accessModes: ["ReadWriteOnce"]
+ # resources:
+ # requests:
+ # storage: 50Gi
+ # selector: {}
+
+
+ ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false
+ ##
+ externalUrl:
+
+ ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
+ ## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
+ ##
+ routePrefix: /
+
+ ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
+ ##
+ paused: false
+
+ ## Define which Nodes the Pods are scheduled on.
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+
+ ## Define resources requests and limits for single Pods.
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ##
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+ ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
+ ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
+ ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
+ ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
+ ##
+ podAntiAffinity: ""
+
+ ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
+ ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
+ ##
+ podAntiAffinityTopologyKey: kubernetes.io/hostname
+
+ ## If specified, the pod's tolerations.
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+
+ ## SecurityContext holds pod-level security attributes and common container settings.
+ ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ##
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 1000
+ fsGroup: 2000
+
+ ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP.
+ ## Note this is only for the Alertmanager UI, not the gossip communication.
+ ##
+ listenLocal: false
+
+ ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.
+ ##
+ containers: []
+
+ ## Priority class assigned to the Pods
+ ##
+ priorityClassName: ""
+
+ ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
+ ##
+ additionalPeers: []
+
+## Using default values from https://github.com/helm/charts/blob/master/stable/grafana/values.yaml
+##
+grafana:
+ enabled: false
+
+ ## Deploy default dashboards.
+ ##
+ defaultDashboardsEnabled: true
+
+ adminPassword: prom-operator
+
+ ingress:
+ ## If true, Prometheus Ingress will be created
+ ##
+ enabled: false
+
+ ## Annotations for Prometheus Ingress
+ ##
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+
+ ## Labels to be added to the Ingress
+ ##
+ labels: {}
+
+ ## Hostnames.
+ ## Must be provided if Ingress is enable.
+ ##
+ # hosts:
+ # - prometheus.domain.com
+ hosts: []
+
+ ## TLS configuration for prometheus Ingress
+ ## Secret must be manually created in the namespace
+ ##
+ tls: []
+ # - secretName: prometheus-general-tls
+ # hosts:
+ # - prometheus.example.com
+
+ sidecar:
+ dashboards:
+ enabled: true
+ label: grafana_dashboard
+ datasources:
+ enabled: true
+ label: grafana_datasource
+
+ extraConfigmapMounts: []
+ # - name: certs-configmap
+ # mountPath: /etc/grafana/ssl/
+ # configMap: certs-configmap
+ # readOnly: true
+
+ ## If true, create a serviceMonitor for grafana
+ ##
+ serviceMonitor:
+ selfMonitor: true
+
+## Component scraping the kube api server
+##
+
+## Component scraping the kube api server
+##
+kubeApiServer:
+ enabled: false
+ tlsConfig:
+ serverName: kubernetes
+ insecureSkipVerify: false
+
+ ## If your API endpoint address is not reachable (as in AKS) you can replace it with the kubernetes service
+ ##
+ relabelings: []
+ # - sourceLabels:
+ # - __meta_kubernetes_namespace
+ # - __meta_kubernetes_service_name
+ # - __meta_kubernetes_endpoint_port_name
+ # action: keep
+ # regex: default;kubernetes;https
+ # - targetLabel: __address__
+ # replacement: kubernetes.default.svc:443
+
+ serviceMonitor:
+ jobLabel: component
+ selector:
+ matchLabels:
+ component: apiserver
+ provider: kubernetes
+
+## Component scraping the kubelet and kubelet-hosted cAdvisor
+##
+kubelet:
+ enabled: false
+ namespace: kube-system
+
+ serviceMonitor:
+ ## Enable scraping the kubelet over https. For requirements to enable this see
+ ## https://github.com/coreos/prometheus-operator/issues/926
+ ##
+ https: false
+ # cAdvisorMetricRelabelings:
+ # - sourceLabels: [__name__, image]
+ # separator: ;
+ # regex: container_([a-z_]+);
+ # replacement: $1
+ # action: drop
+ # - sourceLabels: [__name__]
+ # separator: ;
+ # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
+ # replacement: $1
+ # action: drop
+
+
+## Component scraping the kube controller manager
+##
+kubeControllerManager:
+ enabled: false
+
+ ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
+ ##
+ endpoints: []
+ # - 10.141.4.22
+ # - 10.141.4.23
+ # - 10.141.4.24
+
+ ## If using kubeControllerManager.endpoints only the port and targetPort are used
+ ##
+ service:
+ port: 10252
+ targetPort: 10252
+ selector:
+ k8s-app: kube-controller-manager
+## Component scraping coreDns. Use either this or kubeDns
+##
+coreDns:
+ enabled: false
+ service:
+ port: 9153
+ targetPort: 9153
+ selector:
+ k8s-app: coredns
+
+## Component scraping kubeDns. Use either this or coreDns
+##
+kubeDns:
+ enabled: false
+ service:
+ selector:
+ k8s-app: kube-dns
+## Component scraping etcd
+##
+kubeEtcd:
+ enabled: false
+
+ ## If your etcd is not deployed as a pod, specify IPs it can be found on
+ ##
+ endpoints: []
+ # - 10.141.4.22
+ # - 10.141.4.23
+ # - 10.141.4.24
+
+ ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used
+ ##
+ service:
+ port: 4001
+ targetPort: 4001
+ selector:
+ k8s-app: etcd-server
+
+ ## Configure secure access to the etcd cluster by loading a secret into prometheus and
+ ## specifying security configuration below. For example, with a secret named etcd-client-cert
+ ##
+ ## serviceMonitor:
+ ## scheme: https
+ ## insecureSkipVerify: false
+ ## serverName: localhost
+ ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
+ ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client
+ ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
+ ##
+ serviceMonitor:
+ scheme: http
+ insecureSkipVerify: false
+ serverName: ""
+ caFile: ""
+ certFile: ""
+ keyFile: ""
+
+
+## Component scraping kube scheduler
+##
+kubeScheduler:
+ enabled: false
+
+ ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
+ ##
+ endpoints: []
+ # - 10.141.4.22
+ # - 10.141.4.23
+ # - 10.141.4.24
+
+ ## If using kubeScheduler.endpoints only the port and targetPort are used
+ ##
+ service:
+ port: 10251
+ targetPort: 10251
+ selector:
+ k8s-app: kube-scheduler
+
+## Component scraping kube state metrics
+##
+kubeStateMetrics:
+ enabled: false
+
+## Configuration for kube-state-metrics subchart
+##
+kube-state-metrics:
+ rbac:
+ create: true
+ podSecurityPolicy:
+ enabled: true
+
+## Deploy node exporter as a daemonset to all nodes
+##
+nodeExporter:
+ enabled: false
+
+ ## Use the value configured in prometheus-node-exporter.podLabels
+ ##
+ jobLabel: jobLabel
+
+## Configuration for prometheus-node-exporter subchart
+##
+prometheus-node-exporter:
+ podLabels:
+ ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards
+ ##
+ jobLabel: node-exporter
+ extraArgs:
+ - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
+ - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
+
+## Manages Prometheus and Alertmanager components
+##
+prometheusOperator:
+ enabled: true
+
+ ## Service account for Alertmanager to use.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ ##
+ serviceAccount:
+ create: true
+ name: ""
+
+ ## Configuration for Prometheus operator service
+ ##
+ service:
+ annotations: {}
+ labels: {}
+ clusterIP: ""
+
+ ## Port to expose on each node
+ ## Only used if service.type is 'NodePort'
+ ##
+ nodePort: 30080
+
+
+ ## Loadbalancer IP
+ ## Only use if service.type is "loadbalancer"
+ ##
+ loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+
+ ## Service type
+ ## NodepPort, ClusterIP, loadbalancer
+ ##
+ type: NodePort
+
+ ## List of IP addresses at which the Prometheus server service is available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+
+ ## Deploy CRDs used by Prometheus Operator.
+ ##
+ createCustomResource: true
+
+ ## Customize CRDs API Group
+ crdApiGroup: monitoring.coreos.com
+
+ ## Attempt to clean up CRDs created by Prometheus Operator.
+ ##
+ cleanupCustomResource: true
+
+ ## Labels to add to the operator pod
+ ##
+ podLabels: {}
+
+ ## Assign a PriorityClassName to pods if set
+ # priorityClassName: ""
+
+ ## Define Log Format
+ # Use logfmt (default) or json-formatted logging
+ # logFormat: logfmt
+
+ ## Decrease log verbosity to errors only
+ # logLevel: error
+
+ ## If true, the operator will create and maintain a service for scraping kubelets
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/helm/prometheus-operator/README.md
+ ##
+ kubeletService:
+ enabled: false
+ namespace: kube-system
+
+ ## Create a servicemonitor for the operator
+ ##
+ serviceMonitor:
+ selfMonitor: false
+
+ ## Resource limits & requests
+ ##
+ resources: {}
+ # limits:
+ # cpu: 200m
+ # memory: 200Mi
+ # requests:
+ # cpu: 100m
+ # memory: 100Mi
+
+ ## Define which Nodes the Pods are scheduled on.
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+
+ ## Tolerations for use with node taints
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+
+ ## Assign the prometheus operator to run on specific nodes
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ ##
+ affinity: {}
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: kubernetes.io/e2e-az-name
+ # operator: In
+ # values:
+ # - e2e-az1
+ # - e2e-az2
+
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 65534
+
+ ## Prometheus-operator image
+ ##
+ image:
+ repository: quay.io/coreos/prometheus-operator
+ tag: v0.29.0
+ pullPolicy: IfNotPresent
+
+ ## Configmap-reload image to use for reloading configmaps
+ ##
+ configmapReloadImage:
+ repository: quay.io/coreos/configmap-reload
+ tag: v0.0.1
+
+ ## Prometheus-config-reloader image to use for config and rule reloading
+ ##
+ prometheusConfigReloaderImage:
+ repository: quay.io/coreos/prometheus-config-reloader
+ tag: v0.29.0
+
+ ## Hyperkube image to use when cleaning up
+ ##
+ hyperkubeImage:
+ repository: k8s.gcr.io/hyperkube
+ tag: v1.12.1
+ pullPolicy: IfNotPresent
+
+## Deploy a Prometheus instance
+##
+prometheus:
+
+ enabled: true
+
+ ## DAaaS: Bring up a default instance when Operator comes up
+ ## set startup as false to bring up only operator.
+ startup: false
+
+ ## Service account for Prometheuses to use.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ ##
+ serviceAccount:
+ create: true
+ name: ""
+
+ ## Configuration for Prometheus service
+ ##
+ service:
+ annotations: {}
+ labels: {}
+ clusterIP: ""
+
+
+ ## To be used with a proxy extraContainer port
+ targetPort: 9090
+
+ ## List of IP addresses at which the Prometheus server service is available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+
+ ## Port to expose on each node
+ ## Only used if service.type is 'NodePort'
+ ##
+# nodePort: 39090
+
+ ## Loadbalancer IP
+ ## Only use if service.type is "loadbalancer"
+ loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+ ## Service type
+ ##
+ type: NodePort
+
+ sessionAffinity: ""
+
+ rbac:
+ ## Create role bindings in the specified namespaces, to allow Prometheus monitoring
+ ## a role binding in the release namespace will always be created.
+ ##
+ roleNamespaces:
+ - kube-system
+
+ ## Configure pod disruption budgets for Prometheus
+ ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
+ ## This configuration is immutable once created and will require the PDB to be deleted to be changed
+ ## https://github.com/kubernetes/kubernetes/issues/45398
+ ##
+ podDisruptionBudget:
+ enabled: false
+ minAvailable: 1
+ maxUnavailable: ""
+
+ ingress:
+ enabled: false
+ annotations: {}
+ labels: {}
+
+ ## Hostnames.
+ ## Must be provided if Ingress is enabled.
+ ##
+ # hosts:
+ # - prometheus.domain.com
+ hosts: []
+
+ ## TLS configuration for Prometheus Ingress
+ ## Secret must be manually created in the namespace
+ ##
+ tls: []
+ # - secretName: prometheus-general-tls
+ # hosts:
+ # - prometheus.example.com
+
+ serviceMonitor:
+ selfMonitor: false
+
+ ## Settings affecting prometheusSpec
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
+ ##
+ prometheusSpec:
+
+ ## Interval between consecutive scrapes.
+ ##
+ scrapeInterval: ""
+
+ ## Interval between consecutive evaluations.
+ ##
+ evaluationInterval: ""
+
+ ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.
+ ##
+ listenLocal: false
+
+ ## Image of Prometheus.
+ ##
+ image:
+ repository: quay.io/prometheus/prometheus
+ tag: v2.7.1
+
+ ## Tolerations for use with node taints
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+
+ ## Alertmanagers to which alerts will be sent
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints
+ ##
+ ## Default configuration will connect to the alertmanager deployed as part of this release
+ ##
+ alertingEndpoints: []
+ # - name: ""
+ # namespace: ""
+ # port: http
+ # scheme: http
+
+ ## External labels to add to any time series or alerts when communicating with external systems
+ ##
+ externalLabels: {}
+
+ ## External URL at which Prometheus will be reachable.
+ ##
+ externalUrl: ""
+
+ ## Define which Nodes the Pods are scheduled on.
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+
+ ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
+ ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not
+ ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated
+ ## with the new list of secrets.
+ ##
+ secrets: []
+
+ ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
+ ## The ConfigMaps are mounted into /etc/prometheus/configmaps/.
+ ##
+ configMaps: []
+
+ ## Namespaces to be selected for PrometheusRules discovery.
+ ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery.
+ ## See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage
+ ##
+ ruleNamespaceSelector: {}
+
+ ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
+ ## prometheus resource to be created with selectors based on values in the helm deployment,
+ ## which will also match the PrometheusRule resources created
+ ##
+ ruleSelectorNilUsesHelmValues: true
+
+ ## PrometheusRules to be selected for target discovery.
+ ## If {}, select all ServiceMonitors
+ ##
+ ruleSelector: {}
+ ## Example which select all prometheusrules resources
+ ## with label "prometheus" with values any of "example-rules" or "example-rules-2"
+ # ruleSelector:
+ # matchExpressions:
+ # - key: prometheus
+ # operator: In
+ # values:
+ # - example-rules
+ # - example-rules-2
+ #
+ ## Example which select all prometheusrules resources with label "role" set to "example-rules"
+ # ruleSelector:
+ # matchLabels:
+ # role: example-rules
+
+ ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
+ ## prometheus resource to be created with selectors based on values in the helm deployment,
+ ## which will also match the servicemonitors created
+ ##
+ serviceMonitorSelectorNilUsesHelmValues: true
+
+ ## ServiceMonitors to be selected for target discovery.
+ ## If {}, select all ServiceMonitors
+ ##
+ serviceMonitorSelector: {}
+ ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel"
+ # serviceMonitorSelector:
+ # matchLabels:
+ # prometheus: somelabel
+
+ ## Namespaces to be selected for ServiceMonitor discovery.
+ ## See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage
+ ##
+ serviceMonitorNamespaceSelector: {}
+
+ ## How long to retain metrics
+ ##
+ retention: 10d
+
+ ## If true, the Operator won't process any Prometheus configuration changes
+ ##
+ paused: false
+
+ ## Number of Prometheus replicas desired
+ ##
+ replicas: 1
+
+ ## Log level for Prometheus be configured in
+ ##
+ logLevel: info
+
+ ## Prefix used to register routes, overriding externalUrl route.
+ ## Useful for proxies that rewrite URLs.
+ ##
+ routePrefix: /
+
+ ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
+ ## Metadata Labels and Annotations gets propagated to the prometheus pods.
+ ##
+ podMetadata: {}
+ # labels:
+ # app: prometheus
+ # k8s-app: prometheus
+
+ ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
+ ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
+ ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
+ ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
+ podAntiAffinity: ""
+
+ ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
+ ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
+ ##
+ podAntiAffinityTopologyKey: kubernetes.io/hostname
+
+ ## The remote_read spec configuration for Prometheus.
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotereadspec
+ remoteRead: {}
+ # - url: http://remote1/read
+
+ ## The remote_write spec configuration for Prometheus.
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotewritespec
+ remoteWrite: {}
+ # remoteWrite:
+ # - url: http://remote1/push
+
+ ## Resource limits & requests
+ ##
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+ ## Prometheus StorageSpec for persistent data
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md
+ ##
+ storageSpec: {}
+ # volumeClaimTemplate:
+ # spec:
+ # storageClassName: gluster
+ # accessModes: ["ReadWriteOnce"]
+ # resources:
+ # requests:
+ # storage: 50Gi
+ # selector: {}
+
+ ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations
+ ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form
+ ## as specified in the official Prometheus documentation:
+ ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>. As scrape configs are
+ ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility
+ ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible
+ ## scrape configs are going to break Prometheus after the upgrade.
+ ##
+ ## The scrape configuraiton example below will find master nodes, provided they have the name .*mst.*, relabel the
+ ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes
+ ##
+ additionalScrapeConfigs: []
+ # - job_name: kube-etcd
+ # kubernetes_sd_configs:
+ # - role: node
+ # scheme: https
+ # tls_config:
+ # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
+ # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client
+ # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
+ # relabel_configs:
+ # - action: labelmap
+ # regex: __meta_kubernetes_node_label_(.+)
+ # - source_labels: [__address__]
+ # action: replace
+ # target_label: __address__
+ # regex: ([^:;]+):(\d+)
+ # replacement: ${1}:2379
+ # - source_labels: [__meta_kubernetes_node_name]
+ # action: keep
+ # regex: .*mst.*
+ # - source_labels: [__meta_kubernetes_node_name]
+ # action: replace
+ # target_label: node
+ # regex: (.*)
+ # replacement: ${1}
+ # metric_relabel_configs:
+ # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
+ # action: labeldrop
+
+
+ ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified
+ ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<alertmanager_config>.
+ ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator.
+ ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this
+ ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release
+ ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.
+ ##
+ additionalAlertManagerConfigs: []
+ # - consul_sd_configs:
+ # - server: consul.dev.test:8500
+ # scheme: http
+ # datacenter: dev
+ # tag_separator: ','
+ # services:
+ # - metrics-prometheus-alertmanager
+
+ ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended
+ ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the
+ ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
+ ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the
+ ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel
+ ## configs are going to break Prometheus after the upgrade.
+ ##
+ additionalAlertRelabelConfigs: []
+ # - separator: ;
+ # regex: prometheus_replica
+ # replacement: $1
+ # action: labeldrop
+
+ ## SecurityContext holds pod-level security attributes and common container settings.
+ ## This defaults to non root user with uid 1000 and gid 2000.
+ ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md
+ ##
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 1000
+ fsGroup: 2000
+
+ ## Priority class assigned to the Pods
+ ##
+ priorityClassName: ""
+
+ ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment.
+ ## This section is experimental, it may change significantly without deprecation notice in any release.
+ ## This is experimental and may change significantly without backward compatibility in any release.
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#thanosspec
+ ##
+ thanos: {}
+
+ ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
+ ## if using proxy extraContainer update targetPort with proxy container port
+ containers: []
+
+ ## Enable additional scrape configs that are managed externally to this chart. Note that the prometheus
+ ## will fail to provision if the correct secret does not exist.
+ ##
+ additionalScrapeConfigsExternal: false
+
+ additionalServiceMonitors: []
+ ## Name of the ServiceMonitor to create
+ ##
+ # - name: ""
+
+ ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from
+ ## the chart
+ ##
+ # additionalLabels: {}
+
+ ## Service label for use in assembling a job name of the form <label value>-<port>
+ ## If no label is specified, the service name is used.
+ ##
+ # jobLabel: ""
+
+ ## Label selector for services to which this ServiceMonitor applies
+ ##
+ # selector: {}
+
+ ## Namespaces from which services are selected
+ ##
+ # namespaceSelector:
+ ## Match any namespace
+ ##
+ # any: false
+
+ ## Explicit list of namespace names to select
+ ##
+ # matchNames: []
+
+ ## Endpoints of the selected service to be monitored
+ ##
+ # endpoints: []
+ ## Name of the endpoint's service port
+ ## Mutually exclusive with targetPort
+ # - port: ""
+
+ ## Name or number of the endpoint's target port
+ ## Mutually exclusive with port
+ # - targetPort: ""
+
+ ## File containing bearer token to be used when scraping targets
+ ##
+ # bearerTokenFile: ""
+
+ ## Interval at which metrics should be scraped
+ ##
+ # interval: 30s
+
+ ## HTTP path to scrape for metrics
+ ##
+ # path: /metrics
+
+ ## HTTP scheme to use for scraping
+ ##
+ # scheme: http
+
+ ## TLS configuration to use when scraping the endpoint
+ ##
+ # tlsConfig:
+
+ ## Path to the CA file
+ ##
+ # caFile: ""
+
+ ## Path to client certificate file
+ ##
+ # certFile: ""
+
+ ## Skip certificate verification
+ ##
+ # insecureSkipVerify: false
+
+ ## Path to client key file
+ ##
+ # keyFile: ""
+
+ ## Server name used to verify host name
+ ##
+ # serverName: ""
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/.helmignore b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/.helmignore
new file mode 100644
index 00000000..b7f6f9f1
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/.helmignore
@@ -0,0 +1 @@
+OWNERS
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/Chart.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/Chart.yaml
new file mode 100644
index 00000000..86d0c3ab
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: sparkoperator
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/README.md b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/README.md
new file mode 100755
index 00000000..ba0f05bc
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/README.md
@@ -0,0 +1,42 @@
+### Helm Chart for Spark Operator
+
+This is the Helm chart for the [Spark-on-Kubernetes Operator](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator).
+
+#### Prerequisites
+
+The Operator requires Kubernetes version 1.8 and above because it relies on garbage collection of custom resources. If customization of driver and executor pods (through mounting custom ConfigMaps and volumes) is desired, then the [Mutating Admission Webhook](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/docs/quick-start-guide.md#using-the-mutating-admission-webhook) needs to be enabled and it only became beta in Kubernetes 1.9.
+
+#### Installing the chart
+
+The chart can be installed by running:
+
+```bash
+$ helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator
+$ helm install incubator/sparkoperator --namespace spark-operator
+```
+
+Note that you need to use the `--namespace` flag during `helm install` to specify in which namespace you want to install the operator. The namespace can be existing or not. When it's not available, Helm would take care of creating the namespace. Note that this namespace has no relation to the namespace where you would like to deploy Spark jobs (i.e. the setting `sparkJobNamespace` shown in the table below). They can be the same namespace or different ones.
+
+#### Configuration
+
+The following table lists the configurable parameters of the Spark operator chart and their default values.
+
+| Parameter | Description | Default |
+| ------------------------- | ------------------------------------------------------------ | -------------------------------------- |
+| `operatorImageName` | The name of the operator image | `gcr.io/spark-operator/spark-operator` |
+| `operatorVersion` | The version of the operator to install | `v2.4.0-v1beta1-latest` |
+| `imagePullPolicy` | Docker image pull policy | `IfNotPresent` |
+| `sparkJobNamespace` | K8s namespace where Spark jobs are to be deployed | `default` |
+| `enableWebhook` | Whether to enable mutating admission webhook | false |
+| `enableMetrics` | Whether to expose metrics to be scraped by Premetheus | true |
+| `controllerThreads` | Number of worker threads used by the SparkApplication controller | 10 |
+| `ingressUrlFormat` | Ingress URL format | "" |
+| `installCrds` | Whether to install CRDs | true |
+| `metricsPort` | Port for the metrics endpoint | 10254 |
+| `metricsEndpoint` | Metrics endpoint | "/metrics" |
+| `metricsPrefix` | Prefix for the metrics | "" |
+| `resyncInterval` | Informer resync interval in seconds | 30 |
+| `webhookPort` | Service port of the webhook server | 8080 |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
+
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/_helpers.tpl b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/_helpers.tpl
new file mode 100644
index 00000000..741b500d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/_helpers.tpl
@@ -0,0 +1,48 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "sparkoperator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+ {{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "sparkoperator.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+ {{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "sparkoperator.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+ {{/*
+Create the name of the service account to use
+*/}}
+{{- define "sparkoperator.serviceAccountName" -}}
+{{- if .Values.serviceAccounts.sparkoperator.create -}}
+ {{ default (include "sparkoperator.fullname" .) .Values.serviceAccounts.sparkoperator.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccounts.sparkoperator.name }}
+{{- end -}}
+{{- end -}}
+{{- define "spark.serviceAccountName" -}}
+{{- if .Values.serviceAccounts.spark.create -}}
+ {{ $sparkServiceaccount := printf "%s-%s" .Release.Name "spark" }}
+ {{ default $sparkServiceaccount .Values.serviceAccounts.spark.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccounts.spark.name }}
+{{- end -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-deployment.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-deployment.yaml
new file mode 100755
index 00000000..fdfc51a2
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-deployment.yaml
@@ -0,0 +1,79 @@
+# If the admission webhook is enabled, then a post-install step is required
+# to generate and install the secret in the operator namespace.
+
+# In the post-install hook, the token corresponding to the operator service account
+# is used to authenticate with the Kubernetes API server to install the secret bundle.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "sparkoperator.fullname" . }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ app.kubernetes.io/version: {{ .Values.operatorVersion }}
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ {{- if .Values.enableMetrics }}
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "{{ .Values.metricsPort }}"
+ prometheus.io/path: {{ .Values.metricsEndpoint }}
+ {{- end }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ app.kubernetes.io/version: {{ .Values.operatorVersion }}
+ initializers:
+ pending: []
+ spec:
+ serviceAccountName: {{ include "sparkoperator.serviceAccountName" . }}
+ {{- if .Values.enableWebhook }}
+ volumes:
+ - name: webhook-certs
+ secret:
+ secretName: spark-webhook-certs
+ {{- end }}
+ containers:
+ - name: sparkoperator
+ image: {{ .Values.operatorImageName }}:{{ .Values.operatorVersion }}
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ {{- if .Values.enableWebhook }}
+ volumeMounts:
+ - name: webhook-certs
+ mountPath: /etc/webhook-certs
+ {{- end }}
+ {{- if .Values.enableMetrics }}
+ ports:
+ - containerPort: {{ .Values.metricsPort }}
+ {{ end }}
+ args:
+ - -v=2
+ - -namespace={{ .Values.sparkJobNamespace }}
+ - -ingress-url-format={{ .Values.ingressUrlFormat }}
+ - -install-crds={{ .Values.installCrds }}
+ - -controller-threads={{ .Values.controllerThreads }}
+ - -resync-interval={{ .Values.resyncInterval }}
+ - -logtostderr
+ {{- if .Values.enableMetrics }}
+ - -enable-metrics=true
+ - -metrics-labels=app_type
+ - -metrics-port={{ .Values.metricsPort }}
+ - -metrics-endpoint={{ .Values.metricsEndpoint }}
+ - -metrics-prefix={{ .Values.metricsPrefix }}
+ {{- end }}
+ {{- if .Values.enableWebhook }}
+ - -enable-webhook=true
+ - -webhook-svc-namespace={{ .Release.Namespace }}
+ - -webhook-port={{ .Values.webhookPort }}
+ - -webhook-svc-name={{ .Release.Name }}-webhook
+ - -webhook-config-name={{ include "sparkoperator.fullname" . }}-webhook-config
+ {{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-rbac.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-rbac.yaml
new file mode 100755
index 00000000..bd5fd3fe
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-rbac.yaml
@@ -0,0 +1,55 @@
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "sparkoperator.fullname" . }}-cr
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["services", "configmaps", "secrets"]
+ verbs: ["create", "get", "delete"]
+- apiGroups: ["extensions"]
+ resources: ["ingresses"]
+ verbs: ["create", "get", "delete"]
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["create", "get", "update", "delete"]
+- apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["mutatingwebhookconfigurations"]
+ verbs: ["create", "get", "update", "delete"]
+- apiGroups: ["sparkoperator.k8s.io"]
+ resources: ["sparkapplications", "scheduledsparkapplications"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "sparkoperator.fullname" . }}-crb
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "sparkoperator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: {{ include "sparkoperator.fullname" . }}-cr
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-serviceaccount.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-serviceaccount.yaml
new file mode 100755
index 00000000..5216f8dd
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if .Values.serviceAccounts.sparkoperator.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "sparkoperator.serviceAccountName" . }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-rbac.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-rbac.yaml
new file mode 100755
index 00000000..fa066053
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-rbac.yaml
@@ -0,0 +1,44 @@
+{{- if and (.Values.rbac.create) (ne .Values.sparkJobNamespace "") }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ namespace: {{ .Values.sparkJobNamespace }}
+ name: spark-role
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+rules:
+- apiGroups:
+ - "" # "" indicates the core API group
+ resources:
+ - "pods"
+ verbs:
+ - "*"
+- apiGroups:
+ - "" # "" indicates the core API group
+ resources:
+ - "services"
+ verbs:
+ - "*"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: spark-role-binding
+ namespace: {{ .Values.sparkJobNamespace }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+subjects:
+- kind: ServiceAccount
+ name: {{ include "spark.serviceAccountName" . }}
+ namespace: {{ .Values.sparkJobNamespace }}
+roleRef:
+ kind: Role
+ name: spark-role
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-serviceaccount.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-serviceaccount.yaml
new file mode 100755
index 00000000..bb0e55ea
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccounts.spark.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "spark.serviceAccountName" . }}
+ namespace: {{ .Values.sparkJobNamespace }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-cleanup-job.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-cleanup-job.yaml
new file mode 100755
index 00000000..d6d9df7c
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-cleanup-job.yaml
@@ -0,0 +1,32 @@
+{{ if .Values.enableWebhook }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "sparkoperator.fullname" . }}-cleanup
+ annotations:
+ "helm.sh/hook": pre-delete, pre-upgrade
+ "helm.sh/hook-delete-policy": hook-succeeded
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ template:
+ spec:
+ serviceAccountName: {{ include "sparkoperator.serviceAccountName" . }}
+ restartPolicy: OnFailure
+ containers:
+ - name: main
+ image: {{ .Values.operatorImageName }}:{{ .Values.operatorVersion }}
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ command:
+ - "/bin/sh"
+ - "-c"
+ - "curl -ik \
+ -X DELETE \
+ -H \"Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)\" \
+ -H \"Accept: application/json\" \
+ -H \"Content-Type: application/json\" \
+ https://kubernetes.default.svc/api/v1/namespaces/{{ .Release.Namespace }}/secrets/spark-webhook-certs"
+{{ end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-init-job.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-init-job.yaml
new file mode 100755
index 00000000..a42c3097
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-init-job.yaml
@@ -0,0 +1,24 @@
+{{ if .Values.enableWebhook }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "sparkoperator.fullname" . }}-init
+ annotations:
+ "helm.sh/hook": post-install, post-upgrade
+ "helm.sh/hook-delete-policy": hook-succeeded
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ template:
+ spec:
+ serviceAccountName: {{ include "sparkoperator.serviceAccountName" . }}
+ restartPolicy: OnFailure
+ containers:
+ - name: main
+ image: {{ .Values.operatorImageName }}:{{ .Values.operatorVersion }}
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ command: ["/usr/bin/gencerts.sh", "-n", "{{ .Release.Namespace }}", "-s", "{{ .Release.Name }}-webhook", "-p"]
+{{ end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-service.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-service.yaml
new file mode 100755
index 00000000..42c5bc62
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-service.yaml
@@ -0,0 +1,19 @@
+{{ if .Values.enableWebhook }}
+kind: Service
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-webhook
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ ports:
+ - port: 443
+ targetPort: 8080
+ name: webhook
+ selector:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ app.kubernetes.io/version: {{ .Values.operatorVersion }}
+{{ end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/values.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/values.yaml
new file mode 100644
index 00000000..bfb03eab
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/values.yaml
@@ -0,0 +1,28 @@
+operatorImageName: gcr.io/spark-operator/spark-operator
+operatorVersion: v2.4.0-v1beta1-latest
+imagePullPolicy: IfNotPresent
+
+rbac:
+ create: true
+
+serviceAccounts:
+ spark:
+ create: true
+ name:
+ sparkoperator:
+ create: true
+ name:
+
+sparkJobNamespace: ""
+
+enableWebhook: false
+enableMetrics: true
+
+controllerThreads: 10
+ingressUrlFormat: ""
+installCrds: true
+metricsPort: 10254
+metricsEndpoint: "/metrics"
+metricsPrefix: ""
+resyncInterval: 30
+webhookPort: 8080
diff --git a/vnfs/DAaaS/deploy/operator/resources/m3db.labels b/vnfs/DAaaS/deploy/operator/resources/m3db.labels
new file mode 100644
index 00000000..4f1ddd53
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/resources/m3db.labels
@@ -0,0 +1,7 @@
+kubectl label node/otconap6 failure-domain.beta.kubernetes.io/region=us-west1
+kubectl label node/otconap11 failure-domain.beta.kubernetes.io/region=us-west1
+kubectl label node/otccloud02 failure-domain.beta.kubernetes.io/region=us-west1
+
+kubectl label node/otconap6 failure-domain.beta.kubernetes.io/zone=us-west1-a --overwrite=true
+kubectl label node/otconap11 failure-domain.beta.kubernetes.io/zone=us-west1-b --overwrite=true
+kubectl label node/otccloud02 failure-domain.beta.kubernetes.io/zone=us-west1-c --overwrite=true \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/operator/values.yaml b/vnfs/DAaaS/deploy/operator/values.yaml
new file mode 100644
index 00000000..fd98eb36
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/values.yaml
@@ -0,0 +1,29 @@
+# Copyright © 2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 310
+ repository: nexus3.onap.org:10001
+ readinessRepository: oomk8s
+ readinessImage: readiness-check:2.0.0
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+
+#################################################################
+# k8s Operator Day-0 configuration defaults.
+#################################################################
+
diff --git a/vnfs/DAaaS/deploy/training-core/.helmignore b/vnfs/DAaaS/deploy/training-core/.helmignore
new file mode 100644
index 00000000..ef839191
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+*.label*
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/deploy/training-core/Chart.yaml b/vnfs/DAaaS/deploy/training-core/Chart.yaml
new file mode 100644
index 00000000..9057c590
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: Helm chart for training framework components
+name: training-core
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/.gitignore b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/.gitignore
new file mode 100644
index 00000000..fc82fcb5
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/.gitignore
@@ -0,0 +1,2 @@
+tests/bin
+tests/tmp
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/.travis.yml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/.travis.yml
new file mode 100644
index 00000000..1d3351da
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/.travis.yml
@@ -0,0 +1,20 @@
+sudo: required
+
+env:
+ - CASES=_basic.sh
+ - CASES=_basic-subcharts.sh
+ - CASES=_kerberos.sh
+ - CASES=_single-namenode.sh
+
+before_script:
+# Required for K8s v1.10.x. See
+# https://github.com/kubernetes/kubernetes/issues/61058#issuecomment-372764783
+- sudo mount --make-shared / && sudo service docker restart
+- USE_MINIKUBE_DRIVER_NONE=true USE_SUDO_MINIKUBE=true tests/setup.sh
+
+script:
+- tests/run.sh
+
+after_script:
+- tests/cleanup.sh
+- tests/teardown.sh
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/LICENSE b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/README.md b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/README.md
new file mode 100644
index 00000000..ca694a19
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/README.md
@@ -0,0 +1,12 @@
+---
+layout: global
+title: HDFS on Kubernetes
+---
+# HDFS on Kubernetes
+Repository holding helm charts for running Hadoop Distributed File System (HDFS)
+on Kubernetes.
+
+See [charts/README.md](charts/README.md) for how to run the charts.
+
+See [tests/README.md](tests/README.md) for how to run integration tests for
+HDFS on Kubernetes.
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/README.md b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/README.md
new file mode 100644
index 00000000..15ee8867
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/README.md
@@ -0,0 +1,390 @@
+---
+layout: global
+title: HDFS charts
+---
+
+# HDFS charts
+
+Helm charts for launching HDFS daemons in a K8s cluster. The main entry-point
+chart is `hdfs-k8s`, which is a uber-chart that specifies other charts as
+dependency subcharts. This means you can launch all HDFS components using
+`hdfs-k8s`.
+
+Note that the HDFS charts are currently in pre-alpha quality. They are also
+being heavily revised and are subject to change.
+
+HDFS on K8s supports the following features:
+ - namenode high availability (HA): HDFS namenode daemons are in charge of
+ maintaining file system metadata concerning which directories have which
+ files and where are the file data. Namenode crash will cause service outage.
+ HDFS can run two namenodes in active/standby setup. HDFS on K8s supports HA.
+ - K8s persistent volumes (PV) for metadata: Namenode crash will cause service
+ outage. Losing namenode metadata can lead to loss of file system. HDFS on
+ K8s can store the metadata in remote K8s persistent volumes so that metdata
+ can remain intact even if both namenode daemons are lost or restarted.
+ - K8s HostPath volumes for file data: HDFS datanodes daemons store actual
+ file data. File data should also survive datanode crash or restart. HDFS on
+ K8s stores the file data on the local disks of the K8s cluster nodes using
+ K8s HostPath volumes. (We plan to switch to a better mechanism, K8s
+ persistent local volumes)
+ - Kerberos: Vanilla HDFS is not secure. Intruders can easily write custom
+ client code, put a fake user name in requests and steal data. Production
+ HDFS often secure itself using Kerberos. HDFS on K8s supports Kerberos.
+
+Here is the list of all charts.
+
+ - hdfs-k8s: main uber-chart. Launches other charts.
+ - hdfs-namenode-k8s: a statefulset and other K8s components for launching HDFS
+ namenode daemons, which maintains file system metadata. The chart supports
+ namenode high availability (HA).
+ - hdfs-datanode-k8s: a daemonset and other K8s components for launching HDFS
+ datanode daemons, which are responsible for storing file data.
+ - hdfs-config-k8s: a configmap containing Hadoop config files for HDFS.
+ - zookeeper: This chart is NOT in this repo. But hdfs-k8s pulls the zookeeper
+ chart in the incubator remote repo
+ (https://kubernetes-charts-incubator.storage.googleapis.com/)
+ as a dependency and launhces zookeeper daemons. Zookeeper makes sure
+ only one namenode is active in the HA setup, while the other namenode
+ becomes standby. By default, we will launch three zookeeper servers.
+ - hdfs-journalnode-k8s: a statefulset and other K8s components for launching
+ HDFS journalnode quorums, which ensures the file system metadata are
+ properly shared among the two namenode daemons in the HA setup.
+ By default, we will launch three journalnode servers.
+ - hdfs-client-k8s: a pod that is configured to run Hadoop client commands
+ for accessing HDFS.
+ - hdfs-krb5-k8s: a size-1 statefulset and other K8s components for launching
+ a Kerberos server, which can be used to secure HDFS. Disabled by default.
+ - hdfs-simple-namenode-k8s: Disabled by default. A simpler setup of the
+ namenode that launches only one namenode. i.e. This does not support HA. It
+ does not support Kerberos nor persistent volumes either. As it does not
+ support HA, we also don't need zookeeper nor journal nodes. You may prefer
+ this if you want the simplest possible setup.
+
+# Prerequisite
+
+Requires Kubernetes 1.6+ as the `namenode` and `datanodes` are using
+`ClusterFirstWithHostNet`, which was introduced in Kubernetes 1.6
+
+# Usage
+
+## Basic
+
+The HDFS daemons can be launched using the main `hdfs-k8s` chart. First, build
+the main chart using:
+
+```
+ $ helm repo add incubator \
+ https://kubernetes-charts-incubator.storage.googleapis.com/
+ $ helm dependency build charts/hdfs-k8s
+```
+
+Zookeeper, journalnodes and namenodes need persistent volumes for storing
+metadata. By default, the helm charts do not set the storage class name for
+dynamically provisioned volumes, nor does it use persistent volume selectors for
+static persistent volumes.
+
+This means it will rely on a provisioner for default storage volume class for
+dynamic volumes. Or if your cluster has statically provisioned volumes, the
+chart will match existing volumes entirely based on the size requirements. To
+override this default behavior, you can specify storage volume classes for
+dynamic volumes, or volume selectors for static volumes. See below for how to
+set these options.
+
+ - namenodes: Each of the two namenodes needs at least a 100 GB volume. i.e.
+ Yon need two 100 GB volumes. This can be overridden by the
+ `hdfs-namenode-k8s.persistence.size` option.
+ You can also override the storage class or the selector using
+ `hdfs-namenode-k8s.persistence.storageClass`, or
+ `hdfs-namenode-k8s.persistence.selector` respectively. For details, see the
+ values.yaml file inside `hdfs-namenode-k8s` chart dir.
+ - zookeeper: You need three > 5 GB volumes. i.e. Each of the two zookeeper
+ servers will need at least 5 GB in the volume. Can be overridden by
+ the `zookeeper.persistence.size` option. You can also override
+ the storage class using `zookeeper.persistence.storageClass`.
+ - journalnodes: Each of the three journalnodes will need at least 20 GB in
+ the volume. The size can be overridden by the
+ `hdfs-journalnode-k8s.persistence.size` option.
+ You can also override the storage class or the selector using
+ `hdfs-journalnode-k8s.persistence.storageClass`, or
+ `hdfs-journalnode-k8s.persistence.selector` respectively. For details, see the
+ values.yaml file inside `hdfs-journalnode-k8s` chart dir.
+ - kerberos: The single Kerberos server will need at least 20 GB in the volume.
+ The size can be overridden by the `hdfs-krb5-k8s.persistence.size` option.
+ You can also override the storage class or the selector using
+ `hdfs-krb5-k8s.persistence.storageClass`, or
+ `hdfs-krb5-k8s.persistence.selector` respectively. For details, see the
+ values.yaml file inside `hdfs-krb5-k8s` chart dir.
+
+Then launch the main chart. Specify the chart release name say "my-hdfs",
+which will be the prefix of the K8s resource names for the HDFS components.
+
+```
+ $ helm install -n my-hdfs charts/hdfs-k8s
+```
+
+Wait for all daemons to be ready. Note some daemons may restart themselves
+a few times before they become ready.
+
+```
+ $ kubectl get pod -l release=my-hdfs
+
+ NAME READY STATUS RESTARTS AGE
+ my-hdfs-client-c749d9f8f-d5pvk 1/1 Running 0 2m
+ my-hdfs-datanode-o7jia 1/1 Running 3 2m
+ my-hdfs-datanode-p5kch 1/1 Running 3 2m
+ my-hdfs-datanode-r3kjo 1/1 Running 3 2m
+ my-hdfs-journalnode-0 1/1 Running 0 2m
+ my-hdfs-journalnode-1 1/1 Running 0 2m
+ my-hdfs-journalnode-2 1/1 Running 0 1m
+ my-hdfs-namenode-0 1/1 Running 3 2m
+ my-hdfs-namenode-1 1/1 Running 3 2m
+ my-hdfs-zookeeper-0 1/1 Running 0 2m
+ my-hdfs-zookeeper-1 1/1 Running 0 2m
+ my-hdfs-zookeeper-2 1/1 Running 0 2m
+```
+
+Namenodes and datanodes are currently using the K8s `hostNetwork` so they can
+see physical IPs of each other. If they are not using `hostNetowrk`,
+overlay K8s network providers such as weave-net may mask the physical IPs,
+which will confuse the data locality later inside namenodes.
+
+Finally, test with the client pod:
+
+```
+ $ _CLIENT=$(kubectl get pods -l app=hdfs-client,release=my-hdfs -o name | \
+ cut -d/ -f 2)
+ $ kubectl exec $_CLIENT -- hdfs dfsadmin -report
+ $ kubectl exec $_CLIENT -- hdfs haadmin -getServiceState nn0
+ $ kubectl exec $_CLIENT -- hdfs haadmin -getServiceState nn1
+
+ $ kubectl exec $_CLIENT -- hadoop fs -rm -r -f /tmp
+ $ kubectl exec $_CLIENT -- hadoop fs -mkdir /tmp
+ $ kubectl exec $_CLIENT -- sh -c \
+ "(head -c 100M < /dev/urandom > /tmp/random-100M)"
+ $ kubectl exec $_CLIENT -- hadoop fs -copyFromLocal /tmp/random-100M /tmp
+```
+
+## Kerberos
+
+Kerberos can be enabled by setting a few related options:
+
+```
+ $ helm install -n my-hdfs charts/hdfs-k8s \
+ --set global.kerberosEnabled=true \
+ --set global.kerberosRealm=MYCOMPANY.COM \
+ --set tags.kerberos=true
+```
+
+This will launch all charts including the Kerberos server, which will become
+ready pretty soon. However, HDFS daemon charts will be blocked as the deamons
+require Kerberos service principals to be available. So we need to unblock
+them by creating those principals.
+
+First, create a configmap containing the common Kerberos config file:
+
+```
+ _MY_DIR=~/krb5
+ mkdir -p $_MY_DIR
+ _KDC=$(kubectl get pod -l app=hdfs-krb5,release=my-hdfs --no-headers \
+ -o name | cut -d/ -f2)
+ _run kubectl cp $_KDC:/etc/krb5.conf $_MY_DIR/tmp/krb5.conf
+ _run kubectl create configmap my-hdfs-krb5-config \
+ --from-file=$_MY_DIR/tmp/krb5.conf
+```
+
+Second, create the service principals and passwords. Kerberos requires service
+principals to be host specific. Some HDFS daemons are associated with your K8s
+cluster nodes' physical host names say kube-n1.mycompany.com, while others are
+associated with Kubernetes virtual service names, for instance
+my-hdfs-namenode-0.my-hdfs-namenode.default.svc.cluster.local. You can get
+the list of these host names like:
+
+```
+ $ _HOSTS=$(kubectl get nodes \
+ -o=jsonpath='{.items[*].status.addresses[?(@.type == "Hostname")].address}')
+
+ $ _HOSTS+=$(kubectl describe configmap my-hdfs-config | \
+ grep -A 1 -e dfs.namenode.rpc-address.hdfs-k8s \
+ -e dfs.namenode.shared.edits.dir |
+ grep "<value>" |
+ sed -e "s/<value>//" \
+ -e "s/<\/value>//" \
+ -e "s/:8020//" \
+ -e "s/qjournal:\/\///" \
+ -e "s/:8485;/ /g" \
+ -e "s/:8485\/hdfs-k8s//")
+```
+
+Then generate per-host principal accounts and password keytab files.
+
+```
+ $ _SECRET_CMD="kubectl create secret generic my-hdfs-krb5-keytabs"
+ $ for _HOST in $_HOSTS; do
+ kubectl exec $_KDC -- kadmin.local -q \
+ "addprinc -randkey hdfs/$_HOST@MYCOMPANY.COM"
+ kubectl exec $_KDC -- kadmin.local -q \
+ "addprinc -randkey HTTP/$_HOST@MYCOMPANY.COM"
+ kubectl exec $_KDC -- kadmin.local -q \
+ "ktadd -norandkey -k /tmp/$_HOST.keytab hdfs/$_HOST@MYCOMPANY.COM HTTP/$_HOST@MYCOMPANY.COM"
+ kubectl cp $_KDC:/tmp/$_HOST.keytab $_MY_DIR/tmp/$_HOST.keytab
+ _SECRET_CMD+=" --from-file=$_MY_DIR/tmp/$_HOST.keytab"
+ done
+```
+
+The above was building a command using a shell variable `SECRET_CMD` for
+creating a K8s secret that contains all keytab files. Run the command to create
+the secret.
+
+```
+ $ $_SECRET_CMD
+```
+
+This will unblock all HDFS daemon pods. Wait until they become ready.
+
+Finally, test the setup using the following commands:
+
+```
+ $ _NN0=$(kubectl get pods -l app=hdfs-namenode,release=my-hdfs -o name | \
+ head -1 | \
+ cut -d/ -f2)
+ $ kubectl exec $_NN0 -- sh -c "(apt install -y krb5-user > /dev/null)" \
+ || true
+ $ kubectl exec $_NN0 -- \
+ kinit -kt /etc/security/hdfs.keytab \
+ hdfs/my-hdfs-namenode-0.my-hdfs-namenode.default.svc.cluster.local@MYCOMPANY.COM
+ $ kubectl exec $_NN0 -- hdfs dfsadmin -report
+ $ kubectl exec $_NN0 -- hdfs haadmin -getServiceState nn0
+ $ kubectl exec $_NN0 -- hdfs haadmin -getServiceState nn1
+ $ kubectl exec $_NN0 -- hadoop fs -rm -r -f /tmp
+ $ kubectl exec $_NN0 -- hadoop fs -mkdir /tmp
+ $ kubectl exec $_NN0 -- hadoop fs -chmod 0777 /tmp
+ $ kubectl exec $_KDC -- kadmin.local -q \
+ "addprinc -randkey user1@MYCOMPANY.COM"
+ $ kubectl exec $_KDC -- kadmin.local -q \
+ "ktadd -norandkey -k /tmp/user1.keytab user1@MYCOMPANY.COM"
+ $ kubectl cp $_KDC:/tmp/user1.keytab $_MY_DIR/tmp/user1.keytab
+ $ kubectl cp $_MY_DIR/tmp/user1.keytab $_CLIENT:/tmp/user1.keytab
+
+ $ kubectl exec $_CLIENT -- sh -c "(apt install -y krb5-user > /dev/null)" \
+ || true
+
+ $ kubectl exec $_CLIENT -- kinit -kt /tmp/user1.keytab user1@MYCOMPANY.COM
+ $ kubectl exec $_CLIENT -- sh -c \
+ "(head -c 100M < /dev/urandom > /tmp/random-100M)"
+ $ kubectl exec $_CLIENT -- hadoop fs -ls /
+ $ kubectl exec $_CLIENT -- hadoop fs -copyFromLocal /tmp/random-100M /tmp
+```
+
+## Advanced options
+
+### Setting HostPath volume locations for datanodes
+
+HDFS on K8s stores the file data on the local disks of the K8s cluster nodes
+using K8s HostPath volumes. You may want to change the default locations. Set
+global.dataNodeHostPath to override the default value. Note the option
+takes a list in case you want to use multiple disks.
+
+```
+ $ helm install -n my-hdfs charts/hdfs-k8s \
+ --set "global.dataNodeHostPath={/mnt/sda1/hdfs-data0,/mnt/sda1/hdfs-data1}"
+```
+
+### Using an existing zookeeper quorum
+
+By default, HDFS on K8s pulls in the zookeeper chart in the incubator remote
+repo (https://kubernetes-charts-incubator.storage.googleapis.com/) as a
+dependency and launhces zookeeper daemons. But your K8s cluster may already
+have a zookeeper quorum.
+
+It is possible to use the existing zookeeper. We just need set a few options
+in the helm install command line. It should be something like:
+
+```
+ $helm install -n my-hdfs charts/hdfs-k8s \
+ --set condition.subchart.zookeeper=false \
+ --set global.zookeeperQuorumOverride=zk-0.zk-svc.default.svc.cluster.local:2181,zk-1.zk-svc.default.svc.cluster.local:2181,zk-2.zk-svc.default.svc.cluster.local:2181
+```
+
+Setting `condition.subchart.zookeeper` to false prevents the uber-chart from
+bringing in zookeeper as sub-chart. And the `global.zookeeperQuorumOverride`
+option specifies the custom address for a zookeeper quorum. Use your
+zookeeper address here.
+
+### Pinning namenodes to specific K8s cluster nodes
+
+Optionally, you can attach labels to some of your k8s cluster nodes so that
+namenodes will always run on those cluster nodes. This can allow your HDFS
+client outside the Kubernetes cluster to expect stable IP addresses. When used
+by those outside clients, Kerberos expects the namenode addresses to be stable.
+
+```
+ $ kubectl label nodes YOUR-HOST-1 hdfs-namenode-selector=hdfs-namenode
+ $ kubectl label nodes YOUR-HOST-2 hdfs-namenode-selector=hdfs-namenode
+```
+
+You should add the nodeSelector option to the helm chart command:
+
+```
+ $ helm install -n my-hdfs charts/hdfs-k8s \
+ --set hdfs-namenode-k8s.nodeSelector.hdfs-namenode-selector=hdfs-namenode \
+ ...
+```
+
+### Excluding datanodes from some K8s cluster nodes
+
+You may want to exclude some K8s cluster nodes from datanodes launch target.
+For instance, some K8s clusters may let the K8s cluster master node launch
+a datanode. To prevent this, label the cluster nodes with
+`hdfs-datanode-exclude`.
+
+```
+ $ kubectl label node YOUR-CLUSTER-NODE hdfs-datanode-exclude=yes
+```
+
+### Launching with a non-HA namenode
+
+You may want non-HA namenode since it is the simplest possible setup.
+Note this won't launch zookeepers nor journalnodes.
+
+The single namenode is supposed to be pinned to a cluster host using a node
+label. Attach a label to one of your K8s cluster node.
+
+```
+ $ kubectl label nodes YOUR-CLUSTER-NODE hdfs-namenode-selector=hdfs-namenode-0
+```
+
+The non-HA setup does not even use persistent vlumes. So you don't even
+need to prepare persistent volumes. Instead, it is using hostPath volume
+of the pinned cluster node. So, just launch the chart while
+setting options to turn off HA. You should add the nodeSelector option
+so that the single namenode would find the hostPath volume of the same cluster
+node when the pod restarts.
+
+```
+ $ helm install -n my-hdfs charts/hdfs-k8s \
+ --set tags.ha=false \
+ --set tags.simple=true \
+ --set global.namenodeHAEnabled=false \
+ --set hdfs-simple-namenode-k8s.nodeSelector.hdfs-namenode-selector=hdfs-namenode-0
+```
+
+# Security
+
+## K8s secret containing Kerberos keytab files
+
+The Kerberos setup creates a K8s secret containing all the keytab files of HDFS
+daemon service princialps. This will be mounted onto HDFS daemon pods. You may
+want to restrict access to this secret using k8s
+[RBAC](https://kubernetes.io/docs/admin/authorization/rbac/), to minimize
+exposure of the keytab files.
+
+## HostPath volumes
+`Datanode` daemons run on every cluster node. They also mount k8s `hostPath`
+local disk volumes. You may want to restrict access of `hostPath`
+using `pod security policy`.
+See [reference](https://github.com/kubernetes/examples/blob/master/staging/podsecuritypolicy/rbac/README.md))
+
+## Credits
+
+Many charts are using public Hadoop docker images hosted by
+[uhopper](https://hub.docker.com/u/uhopper/).
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-client-k8s/Chart.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-client-k8s/Chart.yaml
new file mode 100644
index 00000000..00d6f47d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-client-k8s/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+name: hdfs-client-k8s
+version: 0.1.0
+description: A client for HDFS on Kubernetes.
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-client-k8s/templates/client-deployment.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-client-k8s/templates/client-deployment.yaml
new file mode 100644
index 00000000..afffedfd
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-client-k8s/templates/client-deployment.yaml
@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: {{ template "hdfs-k8s.client.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.client.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ template "hdfs-k8s.client.name" . }}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "hdfs-k8s.client.name" . }}
+ release: {{ .Release.Name }}
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ containers:
+ - name: hdfs-client
+ image: uhopper/hadoop:2.7.2
+ env:
+ - name: HADOOP_CUSTOM_CONF_DIR
+ value: /etc/hadoop-custom-conf
+ - name: MULTIHOMED_NETWORK
+ value: "0"
+ command: ['/bin/sh', '-c']
+ args:
+ - /entrypoint.sh /usr/bin/tail -f /var/log/dmesg
+ volumeMounts:
+ - name: hdfs-config
+ mountPath: /etc/hadoop-custom-conf
+ readOnly: true
+ {{- if .Values.global.kerberosEnabled }}
+ - name: kerberos-config
+ mountPath: /etc/krb5.conf
+ subPath: {{ .Values.global.kerberosConfigFileName }}
+ readOnly: true
+ {{- end }}
+ restartPolicy: Always
+ volumes:
+ - name: hdfs-config
+ configMap:
+ name: {{ template "hdfs-k8s.config.fullname" . }}
+ {{- if .Values.global.kerberosEnabled }}
+ - name: kerberos-config
+ configMap:
+ name: {{ template "krb5-configmap" . }}
+ {{- end }}
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/.helmignore b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/.helmignore
new file mode 100644
index 00000000..f0c13194
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/Chart.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/Chart.yaml
new file mode 100644
index 00000000..229c4344
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for configuring HDFS on Kubernetes
+name: hdfs-config-k8s
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/templates/_helpers.tpl b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/templates/_helpers.tpl
new file mode 100644
index 00000000..cd2ff083
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/templates/_helpers.tpl
@@ -0,0 +1,64 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "hdfs-config-k8s.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "hdfs-config-k8s.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "hdfs-config-k8s.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the kerberos principal suffix for core HDFS services
+*/}}
+{{- define "hdfs-principal" -}}
+{{- printf "hdfs/_HOST@%s" .Values.kerberosRealm -}}
+{{- end -}}
+
+{{/*
+Create the kerberos principal for HTTP services
+*/}}
+{{- define "http-principal" -}}
+{{- printf "HTTP/_HOST@%s" .Values.kerberosRealm -}}
+{{- end -}}
+
+{{/*
+Create the datanode data dir list. The below uses two loops to make sure the
+last item does not have comma. It uses index 0 for the last item since that is
+the only special index that helm template gives us.
+*/}}
+{{- define "datanode-data-dirs" -}}
+{{- range $index, $path := .Values.global.dataNodeHostPath -}}
+ {{- if ne $index 0 -}}
+ /hadoop/dfs/data/{{ $index }},
+ {{- end -}}
+{{- end -}}
+{{- range $index, $path := .Values.global.dataNodeHostPath -}}
+ {{- if eq $index 0 -}}
+ /hadoop/dfs/data/{{ $index }}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/templates/configmap.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/templates/configmap.yaml
new file mode 100644
index 00000000..379dab8f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-config-k8s/templates/configmap.yaml
@@ -0,0 +1,197 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "hdfs-k8s.config.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.client.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+data:
+ core-site.xml: |
+ <?xml version="1.0"?>
+ <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+ <configuration>
+ {{- if .Values.global.kerberosEnabled }}
+ <property>
+ <name>hadoop.security.authentication</name>
+ <value>kerberos</value>
+ </property>
+ <!--
+ This is service level RPC authorization, which is separate from HDFS file
+ level ACLs. This concerns who can talk to HDFS daemons including
+ datanodes talking to namenode. As part of the authorization, namenode
+ tries to validate that DNS can uniquely traslate the datanode IP to the
+ hostname in the datanode Kerberos principal. (i.e. The client IP is what
+ Kerberos has authenticated). This does not work well when both namenode
+ and datanodes are using the Kubernetes HostNetwork and namenode is using
+ the StatefulSet. The same cluster node IP can be mapped to two different
+ DNS names. So we disable this. Again this is only service level RPC
+ authorization and does not affect HDFS file level permission ACLs.
+ -->
+ <property>
+ <name>hadoop.security.authorization</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>hadoop.rpc.protection</name>
+ <value>privacy</value>
+ </property>
+ <property>
+ <name>hadoop.user.group.static.mapping.overrides</name>
+ <value>hdfs=root;</value>
+ </property>
+ {{- end }}
+ {{- range $key, $value := .Values.customHadoopConfig.coreSite }}
+ <property>
+ <name>{{ $key }}</name>
+ <value>{{ $value }}</value>
+ </property>
+ {{- end }}
+ {{- if .Values.global.namenodeHAEnabled }}
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://hdfs-k8s</value>
+ </property>
+ <property>
+ <name>ha.zookeeper.quorum</name>
+ <value>{{ template "zookeeper-quorum" . }}</value>
+ </property>
+ {{- else }}
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://{{ template "namenode-svc-0" . }}:8020</value>
+ </property>
+ {{- end }}
+ </configuration>
+ hdfs-site.xml: |
+ <?xml version="1.0"?>
+ <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+ <configuration>
+ {{- if .Values.global.kerberosEnabled }}
+ <property>
+ <name>dfs.block.access.token.enable</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.encrypt.data.transfer</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.namenode.kerberos.principal</name>
+ <value>{{ template "hdfs-principal" . }}</value>
+ </property>
+ {{/*
+ TODO: Check if the https principal is no longer needed in newer Hadoop version.
+ */}}
+ <property>
+ <name>dfs.namenode.kerberos.https.principal</name>
+ <value>{{ template "http-principal" . }}</value>
+ </property>
+ <property>
+ <name>dfs.web.authentication.kerberos.principal</name>
+ <value>{{ template "http-principal" . }}</value>
+ </property>
+ <property>
+ <name>dfs.namenode.keytab.file</name>
+ <value>/etc/security/hdfs.keytab</value>
+ </property>
+ <property>
+ <name>dfs.journalnode.kerberos.principal</name>
+ <value>{{ template "hdfs-principal" . }}</value>
+ </property>
+ <property>
+ <name>dfs.journalnode.kerberos.internal.spnego.principal</name>
+ <value>{{ template "http-principal" . }}</value>
+ </property>
+ <property>
+ <name>dfs.journalnode.keytab.file</name>
+ <value>/etc/security/hdfs.keytab</value>
+ </property>
+ <property>
+ <name>dfs.datanode.kerberos.principal</name>
+ <value>{{ template "hdfs-principal" . }}</value>
+ </property>
+ <property>
+ <name>dfs.datanode.kerberos.https.principal</name>
+ <value>{{ template "http-principal" . }}</value>
+ </property>
+ <property>
+ <name>dfs.datanode.keytab.file</name>
+ <value>/etc/security/hdfs.keytab</value>
+ </property>
+ {{- if .Values.global.jsvcEnabled }}
+ <property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:1004</value>
+ </property>
+ <property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:1006</value>
+ </property>
+ {{- end }}
+ {{- end }}
+ {{- range $key, $value := .Values.customHadoopConfig.hdfsSite }}
+ <property>
+ <name>{{ $key }}</name>
+ <value>{{ $value }}</value>
+ </property>
+ {{- end }}
+ {{- if .Values.global.namenodeHAEnabled }}
+ <property>
+ <name>dfs.nameservices</name>
+ <value>hdfs-k8s</value>
+ </property>
+ <property>
+ <name>dfs.ha.namenodes.hdfs-k8s</name>
+ <value>nn0,nn1</value>
+ </property>
+ <property>
+ <name>dfs.namenode.rpc-address.hdfs-k8s.nn0</name>
+ <value>{{ template "namenode-svc-0" . }}:8020</value>
+ </property>
+ <property>
+ <name>dfs.namenode.rpc-address.hdfs-k8s.nn1</name>
+ <value>{{ template "namenode-svc-1" . }}:8020</value>
+ </property>
+ <property>
+ <name>dfs.namenode.http-address.hdfs-k8s.nn0</name>
+ <value>{{ template "namenode-svc-0" . }}:50070</value>
+ </property>
+ <property>
+ <name>dfs.namenode.http-address.hdfs-k8s.nn1</name>
+ <value>{{ template "namenode-svc-1" . }}:50070</value>
+ </property>
+ <property>
+ <name>dfs.namenode.shared.edits.dir</name>
+ <value>qjournal://{{ template "journalnode-quorum" . }}/hdfs-k8s</value>
+ </property>
+ <property>
+ <name>dfs.ha.automatic-failover.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.ha.fencing.methods</name>
+ <value>shell(/bin/true)</value>
+ </property>
+ <property>
+ <name>dfs.journalnode.edits.dir</name>
+ <value>/hadoop/dfs/journal</value>
+ </property>
+ <property>
+ <name>dfs.client.failover.proxy.provider.hdfs-k8s</name>
+ <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+ </property>
+ {{- end }}
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <value>file:///hadoop/dfs/name</value>
+ </property>
+ <property>
+ <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>{{ template "datanode-data-dirs" . }}</value>
+ </property>
+ </configuration>
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-datanode-k8s/Chart.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-datanode-k8s/Chart.yaml
new file mode 100644
index 00000000..ec837254
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-datanode-k8s/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+name: hdfs-datanode-k8s
+version: 0.1.0
+description: Datanodes for HDFS on Kubernetes.
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-datanode-k8s/templates/datanode-daemonset.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-datanode-k8s/templates/datanode-daemonset.yaml
new file mode 100644
index 00000000..09445ed0
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-datanode-k8s/templates/datanode-daemonset.yaml
@@ -0,0 +1,191 @@
+# Provides datanode helper scripts.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "hdfs-k8s.datanode.fullname" . }}-scripts
+ labels:
+ app: {{ template "hdfs-k8s.datanode.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+data:
+ check-status.sh: |
+ #!/usr/bin/env bash
+ # Exit on error. Append "|| true" if you expect an error.
+ set -o errexit
+ # Exit on error inside any functions or subshells.
+ set -o errtrace
+ # Do not allow use of undefined vars. Use ${VAR:-} to use an undefined VAR
+ set -o nounset
+ # Catch an error in command pipes. e.g. mysqldump fails (but gzip succeeds)
+ # in `mysqldump |gzip`
+ set -o pipefail
+ # Turn on traces, useful while debugging.
+ set -o xtrace
+
+ # Check if datanode registered with the namenode and got non-null cluster ID.
+ _PORTS="50075 1006"
+ _URL_PATH="jmx?qry=Hadoop:service=DataNode,name=DataNodeInfo"
+ _CLUSTER_ID=""
+ for _PORT in $_PORTS; do
+ _CLUSTER_ID+=$(curl -s http://localhost:${_PORT}/$_URL_PATH | \
+ grep ClusterId) || true
+ done
+ echo $_CLUSTER_ID | grep -q -v null
+---
+# Deleting a daemonset may need some trick. See
+# https://github.com/kubernetes/kubernetes/issues/33245#issuecomment-261250489
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: {{ template "hdfs-k8s.datanode.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.datanode.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+spec:
+ template:
+ metadata:
+ labels:
+ app: {{ template "hdfs-k8s.datanode.name" . }}
+ release: {{ .Release.Name }}
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- else if .Values.global.defaultAffinityEnabled }}
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: {{ template "hdfs-k8s.datanode.fullname" . }}-exclude
+ operator: DoesNotExist
+ {{- end }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+ {{- end }}
+ hostNetwork: true
+ hostPID: true
+ dnsPolicy: ClusterFirstWithHostNet
+ containers:
+ - name: datanode
+ image: uhopper/hadoop-datanode:2.7.2
+ env:
+ - name: HADOOP_CUSTOM_CONF_DIR
+ value: /etc/hadoop-custom-conf
+ - name: MULTIHOMED_NETWORK
+ value: "0"
+ {{- if and .Values.global.kerberosEnabled .Values.global.jsvcEnabled }}
+ - name: HADOOP_SECURE_DN_USER
+ value: root
+ - name: JSVC_OUTFILE
+ value: /dev/stdout
+ - name: JSVC_ERRFILE
+ value: /dev/stderr
+ - name: JSVC_HOME
+ value: /jsvc-home
+ {{- end }}
+ livenessProbe:
+ exec:
+ command:
+ - /dn-scripts/check-status.sh
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ readinessProbe:
+ exec:
+ command:
+ - /dn-scripts/check-status.sh
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: dn-scripts
+ mountPath: /dn-scripts
+ readOnly: true
+ - name: hdfs-config
+ mountPath: /etc/hadoop-custom-conf
+ readOnly: true
+ {{- range $index, $path := .Values.global.dataNodeHostPath }}
+ - name: hdfs-data-{{ $index }}
+ mountPath: /hadoop/dfs/data/{{ $index }}
+ {{- end }}
+ {{- if .Values.global.kerberosEnabled }}
+ - name: kerberos-config
+ mountPath: /etc/krb5.conf
+ subPath: {{ .Values.global.kerberosConfigFileName }}
+ readOnly: true
+ - name: kerberos-keytab-copy
+ mountPath: /etc/security/
+ readOnly: true
+ {{- if .Values.global.jsvcEnabled }}
+ - name: jsvc-home
+ mountPath: /jsvc-home
+ {{- end }}
+ {{- end }}
+ {{- if .Values.global.kerberosEnabled }}
+ initContainers:
+ - name: copy-kerberos-keytab
+ image: busybox:1.27.1
+ command: ['sh', '-c']
+ args:
+ - cp /kerberos-keytabs/$MY_NODE_NAME.keytab /kerberos-keytab-copy/hdfs.keytab
+ env:
+ - name: MY_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ volumeMounts:
+ - name: kerberos-keytabs
+ mountPath: /kerberos-keytabs
+ - name: kerberos-keytab-copy
+ mountPath: /kerberos-keytab-copy
+ {{- if .Values.global.jsvcEnabled }}
+ - name: copy-jsvc
+ # Pull by digest because the image doesn't have tags to pin.
+ image: mschlimb/jsvc@sha256:bf20eb9a319e9a2f87473d8da7418d21503a97528b932800b6b8417cd31e30ef
+ command: ['sh', '-c']
+ args:
+ - cp /usr/bin/jsvc /jsvc-home/jsvc
+ volumeMounts:
+ - name: jsvc-home
+ mountPath: /jsvc-home
+ {{- end }}
+ {{- end }}
+ restartPolicy: Always
+ volumes:
+ - name: dn-scripts
+ configMap:
+ name: {{ template "hdfs-k8s.datanode.fullname" . }}-scripts
+ defaultMode: 0744
+ {{- range $index, $path := .Values.global.dataNodeHostPath }}
+ - name: hdfs-data-{{ $index }}
+ hostPath:
+ path: {{ $path }}
+ {{- end }}
+ - name: hdfs-config
+ configMap:
+ name: {{ template "hdfs-k8s.config.fullname" . }}
+ {{- if .Values.global.kerberosEnabled }}
+ - name: kerberos-config
+ configMap:
+ name: {{ template "krb5-configmap" . }}
+ - name: kerberos-keytabs
+ secret:
+ secretName: {{ template "krb5-keytabs-secret" . }}
+ - name: kerberos-keytab-copy
+ emptyDir: {}
+ {{- if .Values.global.jsvcEnabled }}
+ - name: jsvc-home
+ emptyDir: {}
+ {{- end }}
+ {{- end }}
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-journalnode-k8s/Chart.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-journalnode-k8s/Chart.yaml
new file mode 100644
index 00000000..a7ea6c8f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-journalnode-k8s/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+name: hdfs-journalnode-k8s
+version: 0.1.0
+description: Journalnode quorum used by HDFS on Kubernetes.
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-journalnode-k8s/templates/journalnode-statefulset.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-journalnode-k8s/templates/journalnode-statefulset.yaml
new file mode 100644
index 00000000..22a4a2b4
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-journalnode-k8s/templates/journalnode-statefulset.yaml
@@ -0,0 +1,180 @@
+# A headless service to create DNS records.
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "hdfs-k8s.journalnode.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.journalnode.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+ annotations:
+ # TODO: Deprecated. Replace tolerate-unready-endpoints with
+ # v1.Service.PublishNotReadyAddresses.
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+ ports:
+ - port: 8485
+ name: jn
+ - port: 8480
+ name: http
+ clusterIP: None
+ selector:
+ app: {{ template "hdfs-k8s.journalnode.name" . }}
+ release: {{ .Release.Name }}
+---
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ template "hdfs-k8s.journalnode.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.journalnode.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "hdfs-k8s.journalnode.name" . }}
+ release: {{ .Release.Name }}
+ minAvailable: {{ div .Values.global.journalnodeQuorumSize 2 | add1 }}
+---
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+ name: {{ template "hdfs-k8s.journalnode.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.journalnode.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+spec:
+ serviceName: {{ template "hdfs-k8s.journalnode.fullname" . }}
+ replicas: {{ .Values.global.journalnodeQuorumSize }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "hdfs-k8s.journalnode.name" . }}
+ release: {{ .Release.Name }}
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- else if .Values.global.defaultAffinityEnabled }}
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: "app"
+ operator: In
+ values:
+ - {{ template "hdfs-k8s.journalnode.name" . }}
+ - key: "release"
+ operator: In
+ values:
+ - {{ .Release.Name }}
+ topologyKey: "kubernetes.io/hostname"
+ {{- end }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+ {{- end }}
+ containers:
+ - name: hdfs-journalnode
+ image: uhopper/hadoop-namenode:2.7.2
+ env:
+ - name: HADOOP_CUSTOM_CONF_DIR
+ value: /etc/hadoop-custom-conf
+ command: ["/entrypoint.sh"]
+ args: ["/opt/hadoop-2.7.2/bin/hdfs", "--config", "/etc/hadoop", "journalnode"]
+ ports:
+ - containerPort: 8485
+ name: jn
+ - containerPort: 8480
+ name: http
+ volumeMounts:
+ # Mount a subpath of the volume so that the journal subdir would be
+ # a brand new empty dir. This way, we won't get affected by
+ # existing files in the volume top dir.
+ - name: editdir
+ mountPath: /hadoop/dfs/journal
+ subPath: journal
+ - name: editdir
+ mountPath: /hadoop/dfs/name
+ subPath: name
+ - name: hdfs-config
+ mountPath: /etc/hadoop-custom-conf
+ readOnly: true
+ {{- if .Values.global.kerberosEnabled }}
+ - name: kerberos-config
+ mountPath: /etc/krb5.conf
+ subPath: {{ .Values.global.kerberosConfigFileName }}
+ readOnly: true
+ - name: kerberos-keytab-copy
+ mountPath: /etc/security/
+ readOnly: true
+ {{- end }}
+ {{- if .Values.global.kerberosEnabled }}
+ initContainers:
+ - name: copy-kerberos-keytab
+ image: busybox:1.27.1
+ command: ['sh', '-c']
+ args:
+ - cp /kerberos-keytabs/${MY_KERBEROS_NAME}*.keytab /kerberos-keytab-copy/hdfs.keytab
+ env:
+ - name: MY_KERBEROS_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: kerberos-keytabs
+ mountPath: /kerberos-keytabs
+ - name: kerberos-keytab-copy
+ mountPath: /kerberos-keytab-copy
+ {{- end }}
+ restartPolicy: Always
+ volumes:
+ - name: hdfs-config
+ configMap:
+ name: {{ template "hdfs-k8s.config.fullname" . }}
+ {{- if .Values.global.kerberosEnabled }}
+ - name: kerberos-config
+ configMap:
+ name: {{ template "krb5-configmap" . }}
+ - name: kerberos-keytabs
+ secret:
+ secretName: {{ template "krb5-keytabs-secret" . }}
+ - name: kerberos-keytab-copy
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.global.podSecurityContext.enabled }}
+ securityContext:
+ runAsUser: {{ .Values.global.podSecurityContext.runAsUser }}
+ fsGroup: {{ .Values.global.podSecurityContext.fsGroup }}
+ {{- end }}
+ volumeClaimTemplates:
+ - metadata:
+ name: editdir
+ spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ {{- if .Values.persistence.storageClass }}
+ {{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+ {{- end }}
+ {{- end }}
+ {{- if .Values.persistence.selector }}
+ selector:
+{{ toYaml .Values.persistence.selector | indent 10 }}
+ {{- end }}
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/.gitignore b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/.gitignore
new file mode 100644
index 00000000..28ebd32d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/.gitignore
@@ -0,0 +1,2 @@
+charts
+requirements.lock
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/.helmignore b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/.helmignore
new file mode 100644
index 00000000..f0c13194
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/Chart.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/Chart.yaml
new file mode 100644
index 00000000..ec58ffb6
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: An entry-point Helm chart for launching HDFS on Kubernetes
+name: hdfs
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/requirements.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/requirements.yaml
new file mode 100644
index 00000000..7f803fdc
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/requirements.yaml
@@ -0,0 +1,59 @@
+dependencies:
+ - name: zookeeper
+ version: "1.0.0"
+ repository: https://kubernetes-charts-incubator.storage.googleapis.com/
+ condition: condition.subchart.zookeeper
+ tags:
+ - ha
+ - kerberos
+ - name: hdfs-config-k8s
+ version: "0.1.0"
+ repository: "file://../hdfs-config-k8s"
+ condition: condition.subchart.config
+ tags:
+ - ha
+ - kerberos
+ - simple
+ - name: hdfs-krb5-k8s
+ version: "0.1.0"
+ repository: "file://../hdfs-krb5-k8s"
+ condition: condition.subchart.kerberos
+ tags:
+ - kerberos
+ - name: hdfs-journalnode-k8s
+ version: "0.1.0"
+ repository: "file://../hdfs-journalnode-k8s"
+ condition: condition.subchart.journalnode
+ tags:
+ - ha
+ - kerberos
+ - name: hdfs-namenode-k8s
+ version: "0.1.0"
+ repository: "file://../hdfs-namenode-k8s"
+ condition: condition.subchart.namenode
+ tags:
+ - ha
+ - kerberos
+ # Non-HA namenode. Disabled by default
+ - name: hdfs-simple-namenode-k8s
+ version: "0.1.0"
+ repository: "file://../hdfs-simple-namenode-k8s"
+ condition: condition.subchart.simple-namenode
+ tags:
+ - simple
+ - name: hdfs-datanode-k8s
+ version: "0.1.0"
+ repository: "file://../hdfs-datanode-k8s"
+ condition: condition.subchart.datanode
+ tags:
+ - ha
+ - kerberos
+ - simple
+ - name: hdfs-client-k8s
+ version: "0.1.0"
+ repository: "file://../hdfs-client-k8s"
+ condition: condition.subchart.client
+ tags:
+ - ha
+ - kerberos
+ - simple
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/templates/_helpers.tpl b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/templates/_helpers.tpl
new file mode 100644
index 00000000..9d03c4d2
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/templates/_helpers.tpl
@@ -0,0 +1,264 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Create a short app name.
+*/}}
+{{- define "hdfs-k8s.name" -}}
+hdfs
+{{- end -}}
+
+{{/*
+Create a fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "hdfs-k8s.fullname" -}}
+{{- if .Values.global.fullnameOverride -}}
+{{- .Values.global.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := include "hdfs-k8s.name" . -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the subchart label.
+*/}}
+{{- define "hdfs-k8s.subchart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "zookeeper-fullname" -}}
+{{- $fullname := include "hdfs-k8s.fullname" . -}}
+{{- if contains "zookeeper" $fullname -}}
+{{- printf "%s" $fullname -}}
+{{- else -}}
+{{- printf "%s-zookeeper" $fullname | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "hdfs-k8s.config.name" -}}
+{{- template "hdfs-k8s.name" . -}}-config
+{{- end -}}
+
+{{- define "hdfs-k8s.config.fullname" -}}
+{{- $fullname := include "hdfs-k8s.fullname" . -}}
+{{- if contains "config" $fullname -}}
+{{- printf "%s" $fullname -}}
+{{- else -}}
+{{- printf "%s-config" $fullname | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "hdfs-k8s.krb5.name" -}}
+{{- template "hdfs-k8s.name" . -}}-krb5
+{{- end -}}
+
+{{- define "hdfs-k8s.krb5.fullname" -}}
+{{- $fullname := include "hdfs-k8s.fullname" . -}}
+{{- if contains "config" $fullname -}}
+{{- printf "%s" $fullname -}}
+{{- else -}}
+{{- printf "%s-krb5" $fullname | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "hdfs-k8s.journalnode.name" -}}
+{{- template "hdfs-k8s.name" . -}}-journalnode
+{{- end -}}
+
+{{- define "hdfs-k8s.journalnode.fullname" -}}
+{{- $fullname := include "hdfs-k8s.fullname" . -}}
+{{- if contains "journalnode" $fullname -}}
+{{- printf "%s" $fullname -}}
+{{- else -}}
+{{- printf "%s-journalnode" $fullname | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "hdfs-k8s.namenode.name" -}}
+{{- template "hdfs-k8s.name" . -}}-namenode
+{{- end -}}
+
+{{- define "hdfs-k8s.namenode.fullname" -}}
+{{- $fullname := include "hdfs-k8s.fullname" . -}}
+{{- if contains "namenode" $fullname -}}
+{{- printf "%s" $fullname -}}
+{{- else -}}
+{{- printf "%s-namenode" $fullname | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "hdfs-k8s.datanode.name" -}}
+{{- template "hdfs-k8s.name" . -}}-datanode
+{{- end -}}
+
+{{- define "hdfs-k8s.datanode.fullname" -}}
+{{- $fullname := include "hdfs-k8s.fullname" . -}}
+{{- if contains "datanode" $fullname -}}
+{{- printf "%s" $fullname -}}
+{{- else -}}
+{{- printf "%s-datanode" $fullname | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "hdfs-k8s.client.name" -}}
+{{- template "hdfs-k8s.name" . -}}-client
+{{- end -}}
+
+{{- define "hdfs-k8s.client.fullname" -}}
+{{- $fullname := include "hdfs-k8s.fullname" . -}}
+{{- if contains "client" $fullname -}}
+{{- printf "%s" $fullname -}}
+{{- else -}}
+{{- printf "%s-client" $fullname | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the kerberos principal suffix for core HDFS services
+*/}}
+{{- define "hdfs-principal" -}}
+{{- printf "hdfs/_HOST@%s" .Values.global.kerberosRealm -}}
+{{- end -}}
+
+{{/*
+Create the kerberos principal for HTTP services
+*/}}
+{{- define "http-principal" -}}
+{{- printf "HTTP/_HOST@%s" .Values.global.kerberosRealm -}}
+{{- end -}}
+
+{{/*
+Create the name for a Kubernetes Configmap containing a Kerberos config file.
+*/}}
+{{- define "krb5-configmap" -}}
+{{- if .Values.global.kerberosConfigMapOverride -}}
+{{- .Values.global.kerberosConfigMapOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := include "hdfs-k8s.krb5.fullname" . -}}
+{{- printf "%s-config" $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name for a Kubernetes Secret containing Kerberos keytabs.
+*/}}
+{{- define "krb5-keytabs-secret" -}}
+{{- if .Values.global.kerberosKeytabsSecretOverride -}}
+{{- .Values.global.kerberosKeytabsSecretOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := include "hdfs-k8s.krb5.fullname" . -}}
+{{- printf "%s-keytabs" $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+
+{{/*
+Create the domain name part of services.
+The HDFS config file should specify FQDN of services. Otherwise, Kerberos
+login may fail.
+*/}}
+{{- define "svc-domain" -}}
+{{- printf "%s.svc.cluster.local" .Release.Namespace -}}
+{{- end -}}
+
+{{/*
+Create the zookeeper quorum server list. The below uses two loops to make
+sure the last item does not have comma. It uses index 0 for the last item
+since that is the only special index that helm template gives us.
+*/}}
+{{- define "zookeeper-quorum" -}}
+{{- if .Values.global.zookeeperQuorumOverride -}}
+{{- .Values.global.zookeeperQuorumOverride -}}
+{{- else -}}
+{{- $service := include "zookeeper-fullname" . -}}
+{{- $domain := include "svc-domain" . -}}
+{{- $replicas := .Values.global.zookeeperQuorumSize | int -}}
+{{- range $i, $e := until $replicas -}}
+ {{- if ne $i 0 -}}
+ {{- printf "%s-%d.%s-headless.%s:2181," $service $i $service $domain -}}
+ {{- end -}}
+{{- end -}}
+{{- range $i, $e := until $replicas -}}
+ {{- if eq $i 0 -}}
+ {{- printf "%s-%d.%s-headless.%s:2181" $service $i $service $domain -}}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Construct the name of the Kerberos KDC pod 0.
+*/}}
+{{- define "krb5-pod-0" -}}
+{{- template "hdfs-k8s.krb5.fullname" . -}}-0
+{{- end -}}
+
+{{/*
+Construct the full name of the Kerberos KDC statefulset member 0.
+*/}}
+{{- define "krb5-svc-0" -}}
+{{- $pod := include "krb5-pod-0" . -}}
+{{- $service := include "hdfs-k8s.krb5.fullname" . -}}
+{{- $domain := include "svc-domain" . -}}
+{{- printf "%s.%s.%s" $pod $service $domain -}}
+{{- end -}}
+
+{{/*
+Create the journalnode quorum server list. The below uses two loops to make
+sure the last item does not have the delimiter. It uses index 0 for the last
+item since that is the only special index that helm template gives us.
+*/}}
+{{- define "journalnode-quorum" -}}
+{{- $service := include "hdfs-k8s.journalnode.fullname" . -}}
+{{- $domain := include "svc-domain" . -}}
+{{- $replicas := .Values.global.journalnodeQuorumSize | int -}}
+{{- range $i, $e := until $replicas -}}
+ {{- if ne $i 0 -}}
+ {{- printf "%s-%d.%s.%s:8485;" $service $i $service $domain -}}
+ {{- end -}}
+{{- end -}}
+{{- range $i, $e := until $replicas -}}
+ {{- if eq $i 0 -}}
+ {{- printf "%s-%d.%s.%s:8485" $service $i $service $domain -}}
+ {{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Construct the name of the namenode pod 0.
+*/}}
+{{- define "namenode-pod-0" -}}
+{{- template "hdfs-k8s.namenode.fullname" . -}}-0
+{{- end -}}
+
+{{/*
+Construct the full name of the namenode statefulset member 0.
+*/}}
+{{- define "namenode-svc-0" -}}
+{{- $pod := include "namenode-pod-0" . -}}
+{{- $service := include "hdfs-k8s.namenode.fullname" . -}}
+{{- $domain := include "svc-domain" . -}}
+{{- printf "%s.%s.%s" $pod $service $domain -}}
+{{- end -}}
+
+{{/*
+Construct the name of the namenode pod 1.
+*/}}
+{{- define "namenode-pod-1" -}}
+{{- template "hdfs-k8s.namenode.fullname" . -}}-1
+{{- end -}}
+
+{{/*
+Construct the full name of the namenode statefulset member 1.
+*/}}
+{{- define "namenode-svc-1" -}}
+{{- $pod := include "namenode-pod-1" . -}}
+{{- $service := include "hdfs-k8s.namenode.fullname" . -}}
+{{- $domain := include "svc-domain" . -}}
+{{- printf "%s.%s.%s" $pod $service $domain -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/values.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/values.yaml
new file mode 100644
index 00000000..77ca3fe0
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-k8s/values.yaml
@@ -0,0 +1,248 @@
+## ------------------------------------------------------------------------------
+## zookeeper:
+## ------------------------------------------------------------------------------
+zookeeper:
+ ## Configure Zookeeper resource requests and limits
+ ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ resources: ~
+
+ ## The JVM heap size to allocate to Zookeeper
+ env:
+ ZK_HEAP_SIZE: 1G
+
+ ## The number of zookeeper server to have in the quorum.
+ replicaCount: 3
+
+## ------------------------------------------------------------------------------
+## hdfs-config-k8s:
+## ------------------------------------------------------------------------------
+hdfs-config-k8s:
+ ## Custom hadoop config keys passed to the hdfs configmap as extra keys.
+ customHadoopConfig:
+ coreSite: {}
+ ## Set config key and value pairs, e.g.
+ # hadoop.http.authentication.type: kerberos
+
+ hdfsSite: {}
+ ## Set config key and value pairs, e.g.
+ # dfs.datanode.use.datanode.hostname: "false"
+
+## ------------------------------------------------------------------------------
+## hdfs-journalnode-k8s:
+## ------------------------------------------------------------------------------
+hdfs-journalnode-k8s:
+ persistence:
+ ## Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+ ## To choose a suitable persistent volume from available static volumes, selectors
+ ## are used.
+ # selector:
+ # matchLabels:
+ # volume-type: hdfs-ssd
+ accessMode: ReadWriteOnce
+ size: 20Gi
+
+ ## Node labels and tolerations for pod assignment
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+
+## ------------------------------------------------------------------------------
+## hdfs-namenode-k8s:
+## ------------------------------------------------------------------------------
+hdfs-namenode-k8s:
+ ## Name of the namenode start script in the config map.
+ namenodeStartScript: format-and-run.sh
+
+ ## A namenode start script that can have user specified content.
+ ## Can be used to conduct ad-hoc operation as specified by a user.
+ ## To use this, also set the namenodeStartScript variable above
+ ## to custom-run.sh.
+ customRunScript: |
+ #!/bin/bash -x
+ echo Write your own script content!
+ echo This message will disappear in 10 seconds.
+ sleep 10
+
+ persistence:
+ ## Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+
+ ## To choose a suitable persistent volume from available static volumes, selectors
+ ## are used.
+ # selector:
+ # matchLabels:
+ # volume-type: hdfs-ssd
+
+ accessMode: ReadWriteOnce
+
+ size: 100Gi
+
+ ## Whether or not to use hostNetwork in namenode pods. Disabling this will break
+ ## data locality as namenode will see pod virtual IPs and fails to equate them with
+ ## cluster node physical IPs associated with data nodes.
+ ## We currently disable this only for CI on minikube.
+ hostNetworkEnabled: true
+
+ ## Node labels and tolerations for pod assignment
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+
+## ------------------------------------------------------------------------------
+## hdfs-simple-namenode-k8s:
+## ------------------------------------------------------------------------------
+hdfs-simple-namenode-k8s:
+ ## Path of the local disk directory on a cluster node that will contain the namenode
+ ## fsimage and edit logs. This will be mounted to the namenode as a k8s HostPath
+ ## volume.
+ nameNodeHostPath: /hdfs-name
+
+ ## Node labels and tolerations for pod assignment
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+
+## ------------------------------------------------------------------------------
+## hdfs-datanode-k8s:
+## ------------------------------------------------------------------------------
+hdfs-datanode-k8s:
+ ## Node labels and tolerations for pod assignment
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+
+## ------------------------------------------------------------------------------
+## hdfs-krb5-k8s:
+## ------------------------------------------------------------------------------
+hdfs-krb5-k8s:
+ persistence:
+ ## Persistent Volume Storage Class
+ ## If defined, storageClassName: <storageClass>
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ # storageClass: "-"
+
+ ## To choose a suitable persistent volume from available static volumes, selectors
+ ## are used.
+ # selector:
+ # matchLabels:
+ # volume-type: hdfs-ssd
+
+ accessMode: ReadWriteOnce
+
+ size: 20Gi
+
+ ## We use a 3rd party image built from https://github.com/gcavalcante8808/docker-krb5-server.
+ ## TODO: The pod currently prints out the admin account in plain text.
+ ## Supply an admin account password using a k8s secret.
+ ## TODO: The auto-generated passwords might be weak due to low entropy.
+ ## Increase entropy by running rngd or haveged.
+ ## TODO: Using latest tag is not desirable. The current image does not have specific tags.
+ ## Find a way to fix it.
+ image:
+ repository: gcavalcante8808/krb5-server
+
+ tag: latest
+
+ pullPolicy: IfNotPresent
+
+ service:
+ type: ClusterIP
+
+ port: 88
+## ------------------------------------------------------------------------------
+## Global values affecting all sub-charts:
+## ------------------------------------------------------------------------------
+global:
+ ## A list of the local disk directories on cluster nodes that will contain the datanode
+ ## blocks. These paths will be mounted to the datanode as K8s HostPath volumes.
+ ## In a command line, the list should be enclosed in '{' and '}'.
+ ## e.g. --set "dataNodeHostPath={/hdfs-data,/hdfs-data1}"
+ dataNodeHostPath:
+ - /hdfs-data
+
+ ## Parameters for determining which Unix user and group IDs to use in pods.
+ ## Persistent volume permission may need to match these.
+ podSecurityContext:
+ enabled: false
+ runAsUser: 0
+ fsGroup: 1000
+
+ ## Whether or not to expect namenodes in the HA setup.
+ namenodeHAEnabled: true
+
+ ## The number of zookeeper server to have in the quorum.
+ ## This should match zookeeper.replicaCount above. Used only when
+ ## namenodeHAEnabled is set.
+ zookeeperQuorumSize: 3
+
+ ## Override zookeeper quorum address. Zookeeper is used for determining which namenode
+ ## instance is active. Separated by the comma character. Used only when
+ ## namenodeHAEnabled is set.
+ ##
+ # zookeeperQuorumOverride: zk-0.zk-svc.default.svc.cluster.local:2181,zk-1.zk-svc.default.svc.cluster.local:2181,zk-2.zk-svc.default.svc.cluster.local:2181
+
+ ## How many journal nodes to launch as a quorum. Used only when
+ ## namenodeHAEnabled is set.
+ journalnodeQuorumSize: 3
+
+ ## Whether or not to enable default affinity setting.
+ defaultAffinityEnabled: true
+
+ ## Whether or not Kerberos support is enabled.
+ kerberosEnabled: false
+
+ ## Effective only if Kerberos is enabled. Override th name of the k8s
+ ## ConfigMap containing the kerberos config file.
+ ##
+ # kerberosConfigMapOverride: kerberos-config
+
+ ## Effective only if Kerberos is enabled. Name of the kerberos config file inside
+ ## the config map.
+ kerberosConfigFileName: krb5.conf
+
+ ## Effective only if Kerberos is enabled. Override the name of the k8s Secret
+ ## containing the kerberos keytab files of per-host HDFS principals.
+ ## The secret should have multiple data items. Each data item name
+ ## should be formatted as:
+ ## `HOST-NAME.keytab`
+ ## where HOST-NAME should match the cluster node
+ ## host name that each per-host hdfs principal is associated with.
+ ##
+ # kerberosKeytabsSecretOverride: hdfs-kerberos-keytabs
+
+ ## Required to be non-empty if Kerberos is enabled. Specify your Kerberos realm name.
+ ## This should match the realm name in your Kerberos config file.
+ kerberosRealm: MYCOMPANY.COM
+
+ ## Effective only if Kerberos is enabled. Enable protection of datanodes using
+ ## the jsvc utility. See the reference doc at
+ ## https://hadoop.apache.org/docs/r2.7.2/hadoop-project-dist/hadoop-common/SecureMode.html#Secure_DataNode
+ jsvcEnabled: true
+
+## Tags and conditions for triggering a group of relevant subcharts.
+tags:
+ ## Trigger all subcharts required for high availability. Enabled by default.
+ ha: true
+
+ ## Trigger all subcharts required for using Kerberos. Disabled by default.
+ kerberos: false
+
+ ## Trigger all subcharts required for non-HA setup. Disabled by default.
+ simple: false
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/.helmignore b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/.helmignore
new file mode 100644
index 00000000..f0c13194
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/Chart.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/Chart.yaml
new file mode 100644
index 00000000..f8c301f1
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+name: hdfs-krb5-k8s
+version: 0.1.0
+description: Kerberos server that can be used for HDFS on Kubernetes.
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/templates/statefulset.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/templates/statefulset.yaml
new file mode 100644
index 00000000..15be4b2f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-krb5-k8s/templates/statefulset.yaml
@@ -0,0 +1,99 @@
+# A headless service to create DNS records.
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "hdfs-k8s.krb5.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.krb5.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+ annotations:
+ # TODO: Deprecated. Replace tolerate-unready-endpoints with
+ # v1.Service.PublishNotReadyAddresses.
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+ ports:
+ - port: {{ .Values.service.port }}
+ protocol: TCP
+ name: kdc-tcp
+ - port: {{ .Values.service.port }}
+ protocol: UDP
+ name: kdc-udp
+ clusterIP: None
+ selector:
+ app: {{ template "hdfs-k8s.krb5.name" . }}
+ release: {{ .Release.Name }}
+---
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+ name: {{ template "hdfs-k8s.krb5.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.krb5.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+spec:
+ serviceName: {{ template "hdfs-k8s.krb5.fullname" . }}
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ template "hdfs-k8s.krb5.name" . }}
+ release: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "hdfs-k8s.krb5.name" . }}
+ release: {{ .Release.Name }}
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: KRB5_REALM
+ value: {{ .Values.global.kerberosRealm }}
+ - name: KRB5_KDC
+ value: {{ template "krb5-svc-0" . }}
+ ports:
+ - name: kdc-tcp
+ containerPort: 88
+ protocol: TCP
+ - name: kdc-udp
+ containerPort: 88
+ protocol: UDP
+ livenessProbe:
+ tcpSocket:
+ port: kdc-tcp
+ readinessProbe:
+ tcpSocket:
+ port: kdc-tcp
+ restartPolicy: Always
+ {{- if .Values.global.podSecurityContext.enabled }}
+ securityContext:
+ runAsUser: {{ .Values.global.podSecurityContext.runAsUser }}
+ fsGroup: {{ .Values.global.podSecurityContext.fsGroup }}
+ {{- end }}
+ volumeClaimTemplates:
+ - metadata:
+ name: datadir
+ spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ {{- if .Values.persistence.storageClass }}
+ {{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+ {{- end }}
+ {{- end }}
+ {{- if .Values.persistence.selector }}
+ selector:
+{{ toYaml .Values.persistence.selector | indent 10 }}
+ {{- end }}
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-namenode-k8s/Chart.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-namenode-k8s/Chart.yaml
new file mode 100644
index 00000000..f45655f5
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-namenode-k8s/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+name: hdfs-namenode-k8s
+version: 0.1.0
+description: namenodes in HDFS on Kubernetes.
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-namenode-k8s/templates/namenode-statefulset.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-namenode-k8s/templates/namenode-statefulset.yaml
new file mode 100644
index 00000000..44e8fc60
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-namenode-k8s/templates/namenode-statefulset.yaml
@@ -0,0 +1,287 @@
+# A headless service to create DNS records.
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "hdfs-k8s.namenode.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.namenode.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+ annotations:
+ # TODO: Deprecated. Replace tolerate-unready-endpoints with
+ # v1.Service.PublishNotReadyAddresses.
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+ ports:
+ - port: 8020
+ name: fs
+ - port: 50070
+ name: http
+ clusterIP: None
+ selector:
+ app: {{ template "hdfs-k8s.namenode.name" . }}
+ release: {{ .Release.Name }}
+---
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ template "hdfs-k8s.namenode.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.namenode.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+spec:
+ selector:
+ matchLabels:
+ app: {{ template "hdfs-k8s.namenode.name" . }}
+ release: {{ .Release.Name }}
+ minAvailable: 1
+---
+# Provides namenode helper scripts. Most of them are start scripts
+# that meet different needs.
+# TODO: Support upgrade of metadata in case a new Hadoop version requires it.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "hdfs-k8s.namenode.fullname" . }}-scripts
+ labels:
+ app: {{ template "hdfs-k8s.namenode.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+data:
+ # A bootstrap script which will start namenode daemons after conducting
+ # optional metadata initialization steps. The metadata initialization
+ # steps will take place in case the metadata dir is empty,
+ # which will be the case only for the very first run. The specific steps
+ # will differ depending on whether the namenode is active or standby.
+ # We also assume, for the very first run, namenode-0 will be active and
+ # namenode-1 will be standby as StatefulSet will launch namenode-0 first
+ # and zookeeper will determine the sole namenode to be the active one.
+ # For active namenode, the initialization steps will format the metadata,
+ # zookeeper dir and journal node data entries.
+ # For standby namenode, the initialization steps will simply receieve
+ # the first batch of metadata updates from the journal node.
+ format-and-run.sh: |
+ #!/usr/bin/env bash
+ # Exit on error. Append "|| true" if you expect an error.
+ set -o errexit
+ # Exit on error inside any functions or subshells.
+ set -o errtrace
+ # Do not allow use of undefined vars. Use ${VAR:-} to use an undefined VAR
+ set -o nounset
+ # Catch an error in command pipes. e.g. mysqldump fails (but gzip succeeds)
+ # in `mysqldump |gzip`
+ set -o pipefail
+ # Turn on traces, useful while debugging.
+ set -o xtrace
+
+ _HDFS_BIN=$HADOOP_PREFIX/bin/hdfs
+ _METADATA_DIR=/hadoop/dfs/name/current
+ if [[ "$MY_POD" = "$NAMENODE_POD_0" ]]; then
+ if [[ ! -d $_METADATA_DIR ]]; then
+ $_HDFS_BIN --config $HADOOP_CONF_DIR namenode -format \
+ -nonInteractive hdfs-k8s ||
+ (rm -rf $_METADATA_DIR; exit 1)
+ fi
+ _ZKFC_FORMATTED=/hadoop/dfs/name/current/.hdfs-k8s-zkfc-formatted
+ if [[ ! -f $_ZKFC_FORMATTED ]]; then
+ _OUT=$($_HDFS_BIN --config $HADOOP_CONF_DIR zkfc -formatZK -nonInteractive 2>&1)
+ # zkfc masks fatal exceptions and returns exit code 0
+ (echo $_OUT | grep -q "FATAL") && exit 1
+ touch $_ZKFC_FORMATTED
+ fi
+ elif [[ "$MY_POD" = "$NAMENODE_POD_1" ]]; then
+ if [[ ! -d $_METADATA_DIR ]]; then
+ $_HDFS_BIN --config $HADOOP_CONF_DIR namenode -bootstrapStandby \
+ -nonInteractive || \
+ (rm -rf $_METADATA_DIR; exit 1)
+ fi
+ fi
+ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR start zkfc
+ $_HDFS_BIN --config $HADOOP_CONF_DIR namenode
+
+ # A start script that will just hang indefinitely. A user can then get
+ # inside the pod and debug. Or a user can conduct a custom manual operations.
+ do-nothing.sh: |
+ #!/usr/bin/env bash
+ tail -f /var/log/dmesg
+
+ # A start script that has user specified content. Can be used to conduct
+ # ad-hoc operation as specified by a user.
+ custom-run.sh: {{ .Values.customRunScript | quote }}
+---
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+ name: {{ template "hdfs-k8s.namenode.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.namenode.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+spec:
+ serviceName: {{ template "hdfs-k8s.namenode.fullname" . }}
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: {{ template "hdfs-k8s.namenode.name" . }}
+ release: {{ .Release.Name }}
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.hostNetworkEnabled }}
+ # Use hostNetwork so datanodes connect to namenode without going through an overlay network
+ # like weave. Otherwise, namenode fails to see physical IP address of datanodes.
+ # Disabling this will break data locality as namenode will see pod virtual IPs and fails to
+ # equate them with cluster node physical IPs associated with data nodes.
+ # We currently disable this only for CI on minikube.
+ hostNetwork: true
+ hostPID: true
+ dnsPolicy: ClusterFirstWithHostNet
+ {{- else }}
+ dnsPolicy: ClusterFirst
+ {{- end }}
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- else if .Values.global.defaultAffinityEnabled }}
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: "app"
+ operator: In
+ values:
+ - {{ template "hdfs-k8s.namenode.name" . }}
+ - key: "release"
+ operator: In
+ values:
+ - {{ .Release.Name }}
+ topologyKey: "kubernetes.io/hostname"
+ {{- end }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+ {{- end }}
+ containers:
+ # TODO: Support hadoop version as option.
+ - name: hdfs-namenode
+ image: uhopper/hadoop-namenode:2.7.2
+ env:
+ - name: HADOOP_CUSTOM_CONF_DIR
+ value: /etc/hadoop-custom-conf
+ - name: MULTIHOMED_NETWORK
+ value: "0"
+ # Used by the start script below.
+ - name: MY_POD
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: NAMENODE_POD_0
+ value: {{ template "namenode-pod-0" . }}
+ - name: NAMENODE_POD_1
+ value: {{ template "namenode-pod-1" . }}
+ command: ['/bin/sh', '-c']
+ # The start script is provided by a config map.
+ args:
+ - /entrypoint.sh "/nn-scripts/{{ .Values.namenodeStartScript }}"
+ ports:
+ - containerPort: 8020
+ name: fs
+ - containerPort: 50070
+ name: http
+ volumeMounts:
+ - name: nn-scripts
+ mountPath: /nn-scripts
+ readOnly: true
+ # Mount a subpath of the volume so that the name subdir would be a
+ # brand new empty dir. This way, we won't get affected by existing
+ # files in the volume top dir.
+ - name: metadatadir
+ mountPath: /hadoop/dfs/name
+ subPath: name
+ - name: hdfs-config
+ mountPath: /etc/hadoop-custom-conf
+ readOnly: true
+ {{- if .Values.global.kerberosEnabled }}
+ - name: kerberos-config
+ mountPath: /etc/krb5.conf
+ subPath: {{ .Values.global.kerberosConfigFileName }}
+ readOnly: true
+ - name: kerberos-keytab-copy
+ mountPath: /etc/security/
+ readOnly: true
+ {{- end }}
+ {{- if .Values.global.kerberosEnabled }}
+ initContainers:
+ - name: copy-kerberos-keytab
+ image: busybox:1.27.1
+ command: ['sh', '-c']
+ args:
+ - cp /kerberos-keytabs/${MY_KERBEROS_NAME}*.keytab /kerberos-keytab-copy/hdfs.keytab
+ env:
+ - name: MY_KERBEROS_NAME
+ valueFrom:
+ fieldRef:
+ {{- if .Values.hostNetworkEnabled }}
+ fieldPath: spec.nodeName
+ {{- else }}
+ fieldPath: metadata.name
+ {{- end }}
+ volumeMounts:
+ - name: kerberos-keytabs
+ mountPath: /kerberos-keytabs
+ - name: kerberos-keytab-copy
+ mountPath: /kerberos-keytab-copy
+ {{- end }}
+ restartPolicy: Always
+ volumes:
+ - name: nn-scripts
+ configMap:
+ name: {{ template "hdfs-k8s.namenode.fullname" . }}-scripts
+ defaultMode: 0744
+ - name: hdfs-config
+ configMap:
+ name: {{ template "hdfs-k8s.config.fullname" . }}
+ {{- if .Values.global.kerberosEnabled }}
+ - name: kerberos-config
+ configMap:
+ name: {{ template "krb5-configmap" . }}
+ - name: kerberos-keytabs
+ secret:
+ secretName: {{ template "krb5-keytabs-secret" . }}
+ - name: kerberos-keytab-copy
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.global.podSecurityContext.enabled }}
+ securityContext:
+ runAsUser: {{ .Values.global.podSecurityContext.runAsUser }}
+ fsGroup: {{ .Values.global.podSecurityContext.fsGroup }}
+ {{- end }}
+ volumeClaimTemplates:
+ - metadata:
+ name: metadatadir
+ spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ {{- if .Values.persistence.storageClass }}
+ {{- if (eq "-" .Values.persistence.storageClass) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: "{{ .Values.persistence.storageClass }}"
+ {{- end }}
+ {{- end }}
+ {{- if .Values.persistence.selector }}
+ selector:
+{{ toYaml .Values.persistence.selector | indent 10 }}
+ {{- end }}
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-simple-namenode-k8s/Chart.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-simple-namenode-k8s/Chart.yaml
new file mode 100644
index 00000000..bcf6f5b0
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-simple-namenode-k8s/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+name: hdfs-simple-namenode-k8s
+version: 0.1.0
+description: Non-HA namenode for HDFS on Kubernetes.
diff --git a/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-simple-namenode-k8s/templates/namenode-statefulset.yaml b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-simple-namenode-k8s/templates/namenode-statefulset.yaml
new file mode 100644
index 00000000..ab92efa9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/kubernetes-HDFS/charts/hdfs-simple-namenode-k8s/templates/namenode-statefulset.yaml
@@ -0,0 +1,82 @@
+# A headless service to create DNS records.
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "hdfs-k8s.namenode.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.namenode.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+spec:
+ ports:
+ - port: 8020
+ name: fs
+ clusterIP: None
+ selector:
+ app: {{ template "hdfs-k8s.namenode.name" . }}
+ release: {{ .Release.Name }}
+---
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+ name: {{ template "hdfs-k8s.namenode.fullname" . }}
+ labels:
+ app: {{ template "hdfs-k8s.namenode.name" . }}
+ chart: {{ template "hdfs-k8s.subchart" . }}
+ release: {{ .Release.Name }}
+spec:
+ serviceName: {{ template "hdfs-k8s.namenode.fullname" . }}
+ # Create a size-1 set.
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: {{ template "hdfs-k8s.namenode.name" . }}
+ release: {{ .Release.Name }}
+ {{- if .Values.podAnnotations }}
+ annotations:
+{{ toYaml .Values.podAnnotations | indent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+ {{- end }}
+ {{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+ {{- end }}
+ # Use hostNetwork so datanodes connect to namenode without going through an overlay network
+ # like weave. Otherwise, namenode fails to see physical IP address of datanodes.
+ hostNetwork: true
+ hostPID: true
+ dnsPolicy: ClusterFirstWithHostNet
+ containers:
+ - name: hdfs-namenode
+ image: uhopper/hadoop-namenode:2.7.2
+ env:
+ - name: HADOOP_CUSTOM_CONF_DIR
+ value: /etc/hadoop-custom-conf
+ - name: CLUSTER_NAME
+ value: hdfs-k8s
+ ports:
+ - containerPort: 8020
+ name: fs
+ volumeMounts:
+ - name: hdfs-name
+ mountPath: /hadoop/dfs/name
+ - name: hdfs-config
+ mountPath: /etc/hadoop-custom-conf
+ readOnly: true
+ restartPolicy: Always
+ volumes:
+ - name: hdfs-name
+ hostPath:
+ path: {{ .Values.nameNodeHostPath }}
+ - name: hdfs-config
+ configMap:
+ name: {{ template "hdfs-k8s.config.fullname" . }}
diff --git a/vnfs/DAaaS/deploy/training-core/charts/m3db/.helmignore b/vnfs/DAaaS/deploy/training-core/charts/m3db/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/m3db/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/deploy/training-core/charts/m3db/Chart.yaml b/vnfs/DAaaS/deploy/training-core/charts/m3db/Chart.yaml
new file mode 100644
index 00000000..10d9d542
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/m3db/Chart.yaml
@@ -0,0 +1,3 @@
+apiVersion: v1
+name: m3db
+version: 0.1.1
diff --git a/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/NOTES.txt b/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/NOTES.txt
new file mode 100644
index 00000000..ee7ee3d7
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/NOTES.txt
@@ -0,0 +1 @@
+M3DB Cluster {{ .Values.m3dbCluster.name }} has been created \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/_helpers.tpl b/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/_helpers.tpl
new file mode 100644
index 00000000..36544b12
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/_helpers.tpl
@@ -0,0 +1,32 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "m3db.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "m3db.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "m3db.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/configmap.yaml b/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/configmap.yaml
new file mode 100644
index 00000000..d7197ae9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/configmap.yaml
@@ -0,0 +1,216 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.m3dbCluster.configMapName }}
+data:
+ m3.yml: |+
+ coordinator:
+ listenAddress:
+ type: "config"
+ value: "0.0.0.0:7201"
+ metrics:
+ scope:
+ prefix: "coordinator"
+ prometheus:
+ handlerPath: /metrics
+ listenAddress: 0.0.0.0:7203
+ sanitization: prometheus
+ samplingRate: 1.0
+ extended: none
+ tagOptions:
+ idScheme: quoted
+ local:
+ namespaces:
+ - namespace: "collectd"
+ type: unaggregated
+ retention: 48h
+ db:
+ logging:
+ level: info
+
+ metrics:
+ prometheus:
+ handlerPath: /metrics
+ sanitization: prometheus
+ samplingRate: 1.0
+ extended: detailed
+
+ listenAddress: 0.0.0.0:9000
+ clusterListenAddress: 0.0.0.0:9001
+ httpNodeListenAddress: 0.0.0.0:9002
+ httpClusterListenAddress: 0.0.0.0:9003
+ debugListenAddress: 0.0.0.0:9004
+
+ hostID:
+ resolver: file
+ file:
+ path: /etc/m3db/pod-identity/identity
+ timeout: 5m
+
+ client:
+ writeConsistencyLevel: majority
+ readConsistencyLevel: unstrict_majority
+ writeTimeout: 10s
+ fetchTimeout: 15s
+ connectTimeout: 20s
+ writeRetry:
+ initialBackoff: 500ms
+ backoffFactor: 3
+ maxRetries: 2
+ jitter: true
+ fetchRetry:
+ initialBackoff: 500ms
+ backoffFactor: 2
+ maxRetries: 3
+ jitter: true
+ backgroundHealthCheckFailLimit: 4
+ backgroundHealthCheckFailThrottleFactor: 0.5
+
+ gcPercentage: 100
+
+ writeNewSeriesAsync: true
+ writeNewSeriesLimitPerSecond: 1048576
+ writeNewSeriesBackoffDuration: 2ms
+
+ bootstrap:
+ bootstrappers:
+ - filesystem
+ - commitlog
+ - peers
+ - uninitialized_topology
+ fs:
+ numProcessorsPerCPU: 0.125
+
+ commitlog:
+ flushMaxBytes: 524288
+ flushEvery: 1s
+ queue:
+ calculationType: fixed
+ size: 2097152
+ blockSize: 10m
+
+ fs:
+ filePathPrefix: /var/lib/m3db
+ writeBufferSize: 65536
+ dataReadBufferSize: 65536
+ infoReadBufferSize: 128
+ seekReadBufferSize: 4096
+ throughputLimitMbps: 100.0
+ throughputCheckEvery: 128
+
+ repair:
+ enabled: false
+ interval: 2h
+ offset: 30m
+ jitter: 1h
+ throttle: 2m
+ checkInterval: 1m
+
+ pooling:
+ blockAllocSize: 16
+ type: simple
+ seriesPool:
+ size: 262144
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ blockPool:
+ size: 262144
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ encoderPool:
+ size: 262144
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ closersPool:
+ size: 104857
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ contextPool:
+ size: 262144
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ segmentReaderPool:
+ size: 16384
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ iteratorPool:
+ size: 2048
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ fetchBlockMetadataResultsPool:
+ size: 65536
+ capacity: 32
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ fetchBlocksMetadataResultsPool:
+ size: 32
+ capacity: 4096
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ hostBlockMetadataSlicePool:
+ size: 131072
+ capacity: 3
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ blockMetadataPool:
+ size: 65536
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ blockMetadataSlicePool:
+ size: 65536
+ capacity: 32
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ blocksMetadataPool:
+ size: 65536
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ blocksMetadataSlicePool:
+ size: 32
+ capacity: 4096
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ identifierPool:
+ size: 262144
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ bytesPool:
+ buckets:
+ - capacity: 16
+ size: 524288
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ - capacity: 32
+ size: 262144
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ - capacity: 64
+ size: 131072
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ - capacity: 128
+ size: 65536
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ - capacity: 256
+ size: 65536
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ - capacity: 1440
+ size: 16384
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ - capacity: 4096
+ size: 8192
+ lowWatermark: 0.7
+ highWatermark: 1.0
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - http://{{ .Release.Name }}-{{ .Values.etcdCluster.name }}:2379
diff --git a/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/etcd-cluster.yaml b/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/etcd-cluster.yaml
new file mode 100644
index 00000000..fcf44256
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/etcd-cluster.yaml
@@ -0,0 +1,20 @@
+apiVersion: "etcd.database.coreos.com/v1beta2"
+kind: "EtcdCluster"
+metadata:
+ name: {{ .Release.Name }}-{{ .Values.etcdCluster.name }}
+ labels:
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: {{ template "m3db.name" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ annotations:
+ etcd.database.coreos.com/scope: clusterwide
+spec:
+ size: {{ .Values.etcdCluster.size }}
+ version: "{{ .Values.etcdCluster.version }}"
+ pod:
+{{ toYaml .Values.etcdCluster.pod | indent 4 }}
+ {{- if .Values.etcdCluster.enableTLS }}
+ TLS:
+{{ toYaml .Values.etcdCluster.tls | indent 4 }}
+ {{- end }}
diff --git a/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/m3dbcluster.yaml b/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/m3dbcluster.yaml
new file mode 100644
index 00000000..5e804351
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/m3db/templates/m3dbcluster.yaml
@@ -0,0 +1,22 @@
+apiVersion: operator.m3db.io/v1alpha1
+kind: M3DBCluster
+metadata:
+ name: {{ .Values.m3dbCluster.name }}
+spec:
+ image: {{ .Values.m3dbCluster.image.repository }}:{{ .Values.m3dbCluster.image.tag }}
+ replicationFactor: {{ .Values.m3dbCluster.replicationFactor }}
+ numberOfShards: {{ .Values.m3dbCluster.numberOfShards }}
+ isolationGroups:
+{{ toYaml .Values.m3dbCluster.isolationGroups | indent 4 }}
+ namespaces:
+{{ toYaml .Values.m3dbCluster.namespaces | indent 4 }}
+ configMapName: {{ .Values.m3dbCluster.configMapName }}
+ resources:
+ requests:
+ memory: 4Gi
+ cpu: '1'
+ limits:
+ memory: 12Gi
+ cpu: '4'
+
+
diff --git a/vnfs/DAaaS/deploy/training-core/charts/m3db/values.yaml b/vnfs/DAaaS/deploy/training-core/charts/m3db/values.yaml
new file mode 100644
index 00000000..ab365cfa
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/charts/m3db/values.yaml
@@ -0,0 +1,51 @@
+m3dbCluster:
+ name: m3db-cluster
+ image:
+ repository: quay.io/m3db/m3dbnode
+ tag: latest
+ replicationFactor: 3
+ numberOfShards: 256
+ isolationGroups:
+ - name: us-west1-a
+ numInstances: 1
+ - name: us-west1-b
+ numInstances: 1
+ - name: us-west1-c
+ numInstances: 1
+ namespaces:
+ - name: collectd
+ preset: 10s:2d
+ configMapName: m3-configuration
+
+etcdCluster:
+ name: etcd
+ size: 3
+ version: 3.3.3
+ image:
+ repository: quay.io/coreos/etcd
+ tag: v3.3.3
+ pullPolicy: Always
+ enableTLS: false
+ # TLS configs
+ tls:
+ static:
+ member:
+ peerSecret: etcd-peer-tls
+ serverSecret: etcd-server-tls
+ operatorSecret: etcd-client-tls
+ ## etcd cluster pod specific values
+ ## Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-members-cluster-with-resource-requirement
+ pod:
+ ## Antiaffinity for etcd pod assignment
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ antiAffinity: false
+ resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ ## Node labels for etcd pod assignment
+ ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ nodeSelector: {}
diff --git a/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/README.md b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/README.md
new file mode 100644
index 00000000..4de7d0f9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/README.md
@@ -0,0 +1,11 @@
+# HDFS-writer
+
+HDFS writer can read from a message from kafka topic and persist that in the
+HDFS file system given. This is a work in progress and shall be moved
+to separate source code repo later.
+
+## Usage
+
+## Config items
+
+## Troubleshooting \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/pom.xml b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/pom.xml
new file mode 100644
index 00000000..20c11fea
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/pom.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>com.intel.onap</groupId>
+ <artifactId>hdfs-writer</artifactId>
+ <version>1.0</version>
+
+ <!--Begin: compile and build the fat jar -->
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.8.1</version>
+ <configuration>
+ <source>1.8</source>
+ <target>1.8</target>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ <version>2.3</version>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ <configuration>
+ <transformers>
+ <transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
+ </transformers>
+ <filters>
+ <filter>
+ <artifact>*:*</artifact>
+ <excludes>
+ <exclude>META-INF/*.SF</exclude>
+ <exclude>META-INF/*.DSA</exclude>
+ <exclude>META-INF/*.RSA</exclude>
+ </excludes>
+ </filter>
+ </filters>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <configuration>
+ <archive>
+ <manifest>
+ <mainClass>kafka2hdfsApp</mainClass>
+ </manifest>
+ </archive>
+ <descriptorRefs>
+ <descriptorRef>jar-with-dependencies</descriptorRef>
+ </descriptorRefs>
+ </configuration>
+ <executions>
+ <execution>
+ <id>make-assembly</id> <!-- this is used for inheritance merges -->
+ <phase>package</phase> <!-- bind to the packaging phase -->
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ <!--End: compile and build the fat jar -->
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-core</artifactId>
+ <version>1.2.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-client</artifactId>
+ <version>3.2.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-hdfs</artifactId>
+ <version>2.7.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.kafka</groupId>
+ <artifactId>kafka-clients</artifactId>
+ <version>2.2.0</version>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.dataformat</groupId>
+ <artifactId>jackson-dataformat-yaml</artifactId>
+ <version>2.9.8</version>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-databind</artifactId>
+ <version>2.2.3</version>
+ </dependency>
+
+ </dependencies>
+
+</project> \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/CreateKafkaConsumer.java b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/CreateKafkaConsumer.java
new file mode 100644
index 00000000..2042a146
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/CreateKafkaConsumer.java
@@ -0,0 +1,81 @@
+import config.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.*;
+
+public class CreateKafkaConsumer {
+
+
+ private static Logger log = LoggerFactory.getLogger(CreateKafkaConsumer.class);
+
+ private final String BOOTSTRAP_SERVERS = (String) Configuration.getSettings().get("kafka").get("bootStrapServers");
+ private final String GROUP_ID_CONFIG = (String) Configuration.getSettings().get("kafka").get("group_id");
+ private final String KEY_DESERIALIZER = (String) Configuration.getSettings().get("kafka").get("key_deserialize_class");
+ private final String VAL_DESERIALIZER = (String) Configuration.getSettings().get("kafka").get("value_deserialize_class");
+ private final String KAFKA_TOPIC = (String) Configuration.getSettings().get("kafka").get("topic");
+
+ private final String HDFS_URL= (String) Configuration.getSettings().get("hdfs").get("hdfsURL");
+ private final String HDFS_REMOTE_FILE = (String) Configuration.getSettings().get("hdfs").get("hdfs_remote_file");
+
+ private KafkaConsumer<String, String> kafkaConsumer;
+ private Properties properties = new Properties();
+ private HdfsWriter hdfsWriter;
+ private FileSystem hdfsFileSystem;
+
+
+
+ public CreateKafkaConsumer() throws IOException{
+ setKafkaProperties();
+ kafkaConsumer = new KafkaConsumer<>(properties);
+ kafkaConsumer.subscribe(Collections.singletonList(KAFKA_TOPIC));
+ hdfsWriter = new HdfsWriter();
+ hdfsFileSystem = hdfsWriter.createHdfsFileSystem(HDFS_URL);
+ log.info(":::Created kafkaConsumer:::");
+ }
+
+ private void setKafkaProperties(){
+
+ properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
+ properties.put(ConsumerConfig.GROUP_ID_CONFIG, GROUP_ID_CONFIG);
+ properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, KEY_DESERIALIZER);
+ properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, VAL_DESERIALIZER);
+ log.info(":::Set kafka properties:::");
+ }
+
+
+ public void processKafkaMessage() throws IOException{
+ try{
+ while(true){
+ ConsumerRecords<String, String> recordsPerPartition = kafkaConsumer.poll(100000);
+ if(recordsPerPartition.isEmpty())
+ log.info(":::recordsPerPartition is NULL:::");
+ else
+ log.info(":::size of recordsPerPartition: "+recordsPerPartition.count()+" :::");
+
+ for(ConsumerRecord<String, String> record:recordsPerPartition){
+ log.info("Topic: "+record.topic());
+ log.info("partition: "+record.partition());
+ log.info("ReceivedKey: "+record.key()+" ReceivedValue: "+record.value());
+ FSDataOutputStream fsDataOutputStream = hdfsWriter.invokeHdfsWriter(hdfsFileSystem, HDFS_REMOTE_FILE);
+ hdfsWriter.writeMessageToHdfs(fsDataOutputStream, record.value());
+ fsDataOutputStream.close();
+ }
+
+ }
+ }
+
+ finally {
+ log.info(":::Closing kafkaConsumer:::");
+ kafkaConsumer.close();
+ }
+ }
+}
diff --git a/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/HdfsWriter.java b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/HdfsWriter.java
new file mode 100644
index 00000000..cd5b6635
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/HdfsWriter.java
@@ -0,0 +1,40 @@
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.URI;
+
+public class HdfsWriter {
+
+ private static Logger log = LoggerFactory.getLogger(CreateKafkaConsumer.class);
+
+
+ public FileSystem createHdfsFileSystem(String hdfsDestination) throws IOException {
+ Configuration hdfsConfiguration = new Configuration();
+ FileSystem hdfsFileSystem = FileSystem.get(URI.create(hdfsDestination), hdfsConfiguration);
+ log.info(":::Created hdfsFileSystem:::");
+ return hdfsFileSystem;
+ }
+
+
+ public void writeMessageToHdfs(FSDataOutputStream fsDataOutputStream, String bytesFromKafka) throws IOException {
+ fsDataOutputStream.writeBytes(bytesFromKafka);
+ log.info(":::Wrote to HDFS:::");
+ }
+
+
+ public FSDataOutputStream invokeHdfsWriter(FileSystem hdfsFileSystem, String hdfsFile) throws IOException {
+ FSDataOutputStream fsDataOutputStream;
+ if(!hdfsFileSystem.exists(new Path("/"+hdfsFile)))
+ fsDataOutputStream = hdfsFileSystem.create(new Path("/"+hdfsFile));
+ else
+ fsDataOutputStream = hdfsFileSystem.append(new Path("/"+hdfsFile));
+ log.info(":::HDFSWriter invoked:::");
+ return fsDataOutputStream;
+ }
+
+}
diff --git a/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/Orchestrator.java b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/Orchestrator.java
new file mode 100644
index 00000000..b4daf2d1
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/Orchestrator.java
@@ -0,0 +1,51 @@
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import config.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.InputStream;
+import java.net.URL;
+import java.util.HashMap;
+import java.util.Map;
+
+
+public class Orchestrator {
+
+ private static Logger logger = LoggerFactory.getLogger(Orchestrator.class);
+
+ public void init(String configYamlFile){
+
+ parseConfigYaml(configYamlFile);
+ }
+
+ private void parseConfigYaml(String configYaml) {
+
+ URL fileUrl = getClass().getResource(configYaml);
+ if(fileUrl==null)
+ System.out.println("::: Config file missing!!! :::");
+
+ else{
+ Configuration conf = new Configuration();
+ ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
+ String realConfigYaml = configYaml;
+
+ if (!realConfigYaml.startsWith("/")) {
+ realConfigYaml = "/" + configYaml;
+ }
+ Map<String, Object> configs;
+ try (InputStream is = getClass().getResourceAsStream(realConfigYaml)) {
+ TypeReference<HashMap<String, Object>> typeRef
+ = new TypeReference<HashMap<String, Object>>() {
+ };
+ configs = mapper.readValue(is, typeRef);
+ conf.init(configs);
+
+ } catch (Exception e) {
+ logger.error(e.getMessage());
+ }
+ }
+ }
+}
+
diff --git a/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/config/Configuration.java b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/config/Configuration.java
new file mode 100644
index 00000000..c7de131b
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/config/Configuration.java
@@ -0,0 +1,38 @@
+package config;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class Configuration{
+
+ private static Logger log = LoggerFactory.getLogger(Configuration.class);
+ private static Map<String, Map<String, Object>> settings;
+
+ public void init(Map<String, Object> yamlConfigs){
+ settings = new HashMap<>();
+
+ if(yamlConfigs!=null){
+ Iterator<String> keys = yamlConfigs.keySet().iterator();
+ while(keys.hasNext()){
+ String key = keys.next();
+
+ Object value = yamlConfigs.get(key);
+
+ if(value instanceof Map){
+ Map<String, Object> valueMap = (Map<String, Object>) value;
+ settings.put(key, valueMap);
+ }
+ }
+ }
+ log.info(":::Settings initiated :::");
+ }
+
+ public static Map<String, Map<String, Object>> getSettings() {
+ return settings;
+ }
+} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/kafka2hdfsApp.java b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/kafka2hdfsApp.java
new file mode 100644
index 00000000..5c041134
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/java/kafka2hdfsApp.java
@@ -0,0 +1,14 @@
+import java.io.IOException;
+
+public class kafka2hdfsApp {
+
+ public static void main(String[] args) throws IOException {
+ System.out.println("Begin::: kafka2hdfsApp");
+ Orchestrator orchestrator = new Orchestrator();
+ orchestrator.init(args[1]);
+
+ CreateKafkaConsumer createKafkaConsumer = new CreateKafkaConsumer();
+ createKafkaConsumer.processKafkaMessage();
+
+ }
+}
diff --git a/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/resources/configs.yaml b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/resources/configs.yaml
new file mode 100644
index 00000000..8955c304
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/hdfs-writer-source-code/hdfs-writer/src/main/resources/configs.yaml
@@ -0,0 +1,10 @@
+kafka:
+ bootStrapServers:
+ group_id:
+ key_deserialize_class:
+ value_deserialize_class:
+ topic:
+
+hdfs:
+ hdfsURL:
+ hdfs_remote_file:
diff --git a/vnfs/DAaaS/deploy/training-core/values.yaml b/vnfs/DAaaS/deploy/training-core/values.yaml
new file mode 100644
index 00000000..fd98eb36
--- /dev/null
+++ b/vnfs/DAaaS/deploy/training-core/values.yaml
@@ -0,0 +1,29 @@
+# Copyright © 2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 310
+ repository: nexus3.onap.org:10001
+ readinessRepository: oomk8s
+ readinessImage: readiness-check:2.0.0
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+
+#################################################################
+# k8s Operator Day-0 configuration defaults.
+#################################################################
+
diff --git a/vnfs/DAaaS/deploy/visualization/.helmignore b/vnfs/DAaaS/deploy/visualization/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/deploy/visualization/Chart.yaml b/vnfs/DAaaS/deploy/visualization/Chart.yaml
new file mode 100644
index 00000000..b85467b4
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: visualization
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/.helmignore b/vnfs/DAaaS/deploy/visualization/charts/grafana/.helmignore
new file mode 100755
index 00000000..7c04072e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+OWNERS
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/Chart.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/Chart.yaml
new file mode 100755
index 00000000..a0721868
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/Chart.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+appVersion: 6.0.2
+description: The leading tool for querying and visualizing time series and metrics.
+engine: gotpl
+home: https://grafana.net
+icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
+kubeVersion: ^1.8.0-0
+maintainers:
+- email: zanhsieh@gmail.com
+ name: zanhsieh
+- email: rluckie@cisco.com
+ name: rtluckie
+- email: maorfr@gmail.com
+ name: maorfr
+name: grafana
+sources:
+- https://github.com/grafana/grafana
+version: 2.3.3
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/README.md b/vnfs/DAaaS/deploy/visualization/charts/grafana/README.md
new file mode 100755
index 00000000..44a30601
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/README.md
@@ -0,0 +1,240 @@
+# Grafana Helm Chart
+
+* Installs the web dashboarding system [Grafana](http://grafana.org/)
+
+## TL;DR;
+
+```console
+$ helm install stable/grafana
+```
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+$ helm install --name my-release stable/grafana
+```
+
+## Uninstalling the Chart
+
+To uninstall/delete the my-release deployment:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+
+## Configuration
+
+| Parameter | Description | Default |
+|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------|
+| `replicas` | Number of nodes | `1` |
+| `deploymentStrategy` | Deployment strategy | `RollingUpdate` |
+| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` |
+| `readinessProbe` | Rediness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`|
+| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "fsGroup": 472}` |
+| `priorityClassName` | Name of Priority Class to assign pods | `nil` |
+| `image.repository` | Image repository | `grafana/grafana` |
+| `image.tag` | Image tag. (`Must be >= 5.0.0`) | `6.0.2` |
+| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
+| `service.type` | Kubernetes service type | `ClusterIP` |
+| `service.port` | Kubernetes port where service is exposed | `80` |
+| `service.targetPort` | internal service is port | `3000` |
+| `service.annotations` | Service annotations | `{}` |
+| `service.labels` | Custom labels | `{}` |
+| `ingress.enabled` | Enables Ingress | `false` |
+| `ingress.annotations` | Ingress annotations | `{}` |
+| `ingress.labels` | Custom labels | `{}` |
+| `ingress.hosts` | Ingress accepted hostnames | `[]` |
+| `ingress.tls` | Ingress TLS configuration | `[]` |
+| `resources` | CPU/Memory resource requests/limits | `{}` |
+| `nodeSelector` | Node labels for pod assignment | `{}` |
+| `tolerations` | Toleration labels for pod assignment | `[]` |
+| `affinity` | Affinity settings for pod assignment | `{}` |
+| `extraInitContainers` | Init containers to add to the grafana pod | `{}` |
+| `extraContainers` | Sidecar containers to add to the grafana pod | `{}` |
+| `persistence.enabled` | Use persistent volume to store data | `false` |
+| `persistence.initChownData` | Change ownership of persistent volume on initialization | `true` |
+| `persistence.size` | Size of persistent volume claim | `10Gi` |
+| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
+| `persistence.storageClassName` | Type of persistent volume claim | `nil` |
+| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` |
+| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` |
+| `schedulerName` | Alternate scheduler name | `nil` |
+| `env` | Extra environment variables passed to pods | `{}` |
+| `envFromSecret` | Name of a Kubenretes secret (must be manually created in the same namespace) containing values to be added to the environment | `""` |
+| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
+| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` |
+| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` |
+| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` |
+| `plugins` | Plugins to be loaded along with Grafana | `[]` |
+| `datasources` | Configure grafana datasources (passed through tpl) | `{}` |
+| `notifiers` | Configure grafana notifiers | `{}` |
+| `dashboardProviders` | Configure grafana dashboard providers | `{}` |
+| `dashboards` | Dashboards to import | `{}` |
+| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` |
+| `grafana.ini` | Grafana's primary configuration | `{}` |
+| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
+| `ldap.config ` | Grafana's LDAP configuration | `""` |
+| `annotations` | Deployment annotations | `{}` |
+| `podAnnotations` | Pod annotations | `{}` |
+| `sidecar.image` | Sidecar image | `kiwigrid/k8s-sidecar:0.0.13` |
+| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` |
+| `sidecar.resources` | Sidecar resources | `{}` |
+| `sidecar.dashboards.enabled` | Enabled the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
+| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` |
+| `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
+| `sidecar.datasources.enabled` | Enabled the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
+| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
+| `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
+| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
+| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
+| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
+| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` |
+| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` |
+| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` |
+| `rbac.create` | Create and use RBAC resources | `true` |
+| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` |
+| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` |
+| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` |
+| `command` | Define command to be executed by grafana container at startup | `nil` |
+
+### Example of extraVolumeMounts
+
+```yaml
+- extraVolumeMounts:
+ - name: plugins
+ mountPath: /var/lib/grafana/plugins
+ subPath: configs/grafana/plugins
+ existingClaim: existing-grafana-claim
+ readOnly: false
+```
+
+## Import dashboards
+
+There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method:
+
+```yaml
+dashboards:
+ default:
+ some-dashboard:
+ json: |
+ {
+ "annotations":
+
+ ...
+ # Complete json file here
+ ...
+
+ "title": "Some Dashboard",
+ "uid": "abcd1234",
+ "version": 1
+ }
+ custom-dashboard:
+ # This is a path to a file inside the dashboards directory inside the chart directory
+ file: dashboards/custom-dashboard.json
+ prometheus-stats:
+ # Ref: https://grafana.com/dashboards/2
+ gnetId: 2
+ revision: 2
+ datasource: Prometheus
+ local-dashboard:
+ url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
+```
+
+## BASE64 dashboards
+
+Dashboards could be storaged in a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit)
+A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk.
+If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk.
+
+### Gerrit use case:
+Gerrit API for download files has the following schema: https://yourgerritserver/a/{project-name}/branches/{branch-id}/files/{file-id}/content where {project-name} and
+{file-id} usualy has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard
+the url value is https://yourgerritserver/a/user%2Frepo/branches/master/files/dir1%2Fdir2%2Fdashboard/content
+
+## Sidecar for dashboards
+
+If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana pod. This container watches all config maps in the cluster and filters out the ones with a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported dashboards are deleted/updated. A recommendation is to use one configmap per dashboard, as an reduction of multiple dashboards inside one configmap is currently not properly mirrored in grafana.
+Example dashboard config:
+```
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: sample-grafana-dashboard
+ labels:
+ grafana_dashboard: 1
+data:
+ k8s-dashboard.json: |-
+ [...]
+```
+
+## Sidecar for datasources
+
+If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana pod. This container lists all config maps in the cluster and filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in those configmaps are written to a folder and accessed by grafana on startup. Using these yaml files, the data sources in grafana can be imported. The configmaps must be created before `helm install` so that the datasources init container can list the configmaps.
+
+Example datasource config adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file):
+```
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: sample-grafana-datasource
+ labels:
+ grafana_datasource: 1
+data:
+ datasource.yaml: |-
+ # config file version
+ apiVersion: 1
+
+ # list of datasources that should be deleted from the database
+ deleteDatasources:
+ - name: Graphite
+ orgId: 1
+
+ # list of datasources to insert/update depending
+ # whats available in the database
+ datasources:
+ # <string, required> name of the datasource. Required
+ - name: Graphite
+ # <string, required> datasource type. Required
+ type: graphite
+ # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
+ access: proxy
+ # <int> org id. will default to orgId 1 if not specified
+ orgId: 1
+ # <string> url
+ url: http://localhost:8080
+ # <string> database password, if used
+ password:
+ # <string> database user, if used
+ user:
+ # <string> database name, if used
+ database:
+ # <bool> enable/disable basic auth
+ basicAuth:
+ # <string> basic auth username
+ basicAuthUser:
+ # <string> basic auth password
+ basicAuthPassword:
+ # <bool> enable/disable with credentials headers
+ withCredentials:
+ # <bool> mark as default datasource. Max one per org
+ isDefault:
+ # <map> fields that will be converted to json and stored in json_data
+ jsonData:
+ graphiteVersion: "1.1"
+ tlsAuth: true
+ tlsAuthWithCACert: true
+ # <string> json object of data that will be encrypted.
+ secureJsonData:
+ tlsCACert: "..."
+ tlsClientCert: "..."
+ tlsClientKey: "..."
+ version: 1
+ # <bool> allow users to edit datasources from the UI.
+ editable: false
+
+```
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/dashboards/custom-dashboard.json b/vnfs/DAaaS/deploy/visualization/charts/grafana/dashboards/custom-dashboard.json
new file mode 100755
index 00000000..9e26dfee
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/dashboards/custom-dashboard.json
@@ -0,0 +1 @@
+{} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/NOTES.txt b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/NOTES.txt
new file mode 100755
index 00000000..80c6d148
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/NOTES.txt
@@ -0,0 +1,34 @@
+1. Get your '{{ .Values.adminUser }}' user password by running:
+
+ kubectl get secret --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
+
+2. {{ if .Values.ingress.enabled }}
+ From outside the cluster, the server URL(s) are:
+{{- range .Values.ingress.hosts }}
+ http://{{ . }}
+{{- end }}
+{{ else }}
+ Get the Grafana URL to visit by running these commands in the same shell:
+{{ if contains "NodePort" .Values.service.type -}}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{ else if contains "LoadBalancer" .Values.service.type -}}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "grafana.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ http://$SERVICE_IP:{{ .Values.service.port -}}
+{{ else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "grafana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 3000
+{{- end }}
+{{- end }}
+
+3. Login with the password from step 1 and the username: {{ .Values.adminUser }}
+
+{{- if not .Values.persistence.enabled }}
+#################################################################################
+###### WARNING: Persistence is disabled!!! You will lose your data when #####
+###### the Grafana pod is terminated. #####
+#################################################################################
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/_helpers.tpl b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/_helpers.tpl
new file mode 100755
index 00000000..3a3ebd3e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/_helpers.tpl
@@ -0,0 +1,43 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "grafana.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "grafana.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "grafana.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account
+*/}}
+{{- define "grafana.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/clusterrole.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/clusterrole.yaml
new file mode 100755
index 00000000..ccfc7237
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/clusterrole.yaml
@@ -0,0 +1,23 @@
+{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ template "grafana.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- with .Values.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+ name: {{ template "grafana.fullname" . }}-clusterrole
+{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }}
+rules:
+- apiGroups: [""] # "" indicates the core API group
+ resources: ["configmaps"]
+ verbs: ["get", "watch", "list"]
+{{- else }}
+rules: []
+{{- end}}
+{{- end}}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/clusterrolebinding.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/clusterrolebinding.yaml
new file mode 100755
index 00000000..0ffe9ff2
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/clusterrolebinding.yaml
@@ -0,0 +1,23 @@
+{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ template "grafana.fullname" . }}-clusterrolebinding
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ template "grafana.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- with .Values.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "grafana.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: {{ template "grafana.fullname" . }}-clusterrole
+ apiGroup: rbac.authorization.k8s.io
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/configmap-dashboard-provider.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/configmap-dashboard-provider.yaml
new file mode 100755
index 00000000..07717319
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/configmap-dashboard-provider.yaml
@@ -0,0 +1,26 @@
+{{- if .Values.sidecar.dashboards.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ template "grafana.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- with .Values.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+ name: {{ template "grafana.fullname" . }}-config-dashboards
+data:
+ provider.yaml: |-
+ apiVersion: 1
+ providers:
+ - name: 'default'
+ orgId: 1
+ folder: ''
+ type: file
+ disableDeletion: false
+ options:
+ path: {{ .Values.sidecar.dashboards.folder }}
+{{- end}}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/configmap.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/configmap.yaml
new file mode 100755
index 00000000..a2d05075
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/configmap.yaml
@@ -0,0 +1,71 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "grafana.fullname" . }}
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ template "grafana.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+data:
+{{- if .Values.plugins }}
+ plugins: {{ join "," .Values.plugins }}
+{{- end }}
+ grafana.ini: |
+{{- range $key, $value := index .Values "grafana.ini" }}
+ [{{ $key }}]
+ {{- range $elem, $elemVal := $value }}
+ {{ $elem }} = {{ $elemVal }}
+ {{- end }}
+{{- end }}
+
+{{- if .Values.datasources }}
+{{ $root := . }}
+ {{- range $key, $value := .Values.datasources }}
+ {{ $key }}: |
+{{ tpl (toYaml $value | indent 4) $root }}
+ {{- end -}}
+{{- end -}}
+
+{{- if .Values.notifiers }}
+ {{- range $key, $value := .Values.notifiers }}
+ {{ $key }}: |
+{{ toYaml $value | indent 4 }}
+ {{- end -}}
+{{- end -}}
+
+{{- if .Values.dashboardProviders }}
+ {{- range $key, $value := .Values.dashboardProviders }}
+ {{ $key }}: |
+{{ toYaml $value | indent 4 }}
+ {{- end -}}
+{{- end -}}
+
+{{- if .Values.dashboards }}
+ download_dashboards.sh: |
+ #!/usr/bin/env sh
+ set -euf
+ {{- if .Values.dashboardProviders }}
+ {{- range $key, $value := .Values.dashboardProviders }}
+ {{- range $value.providers }}
+ mkdir -p {{ .options.path }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+
+ {{- range $provider, $dashboards := .Values.dashboards }}
+ {{- range $key, $value := $dashboards }}
+ {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
+ curl -sk \
+ --connect-timeout 60 \
+ --max-time 60 \
+ {{- if not $value.b64content }}
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json;charset=UTF-8" \
+ {{- end }}
+ {{- if $value.url -}}{{ $value.url }}{{- else -}} https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download{{- end -}}{{ if $value.datasource }}| sed 's|\"datasource\":[^,]*|\"datasource\": \"{{ $value.datasource }}\"|g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \
+ > /var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json
+ {{- end -}}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/dashboards-json-configmap.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/dashboards-json-configmap.yaml
new file mode 100755
index 00000000..bd46addc
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/dashboards-json-configmap.yaml
@@ -0,0 +1,28 @@
+{{- if .Values.dashboards }}
+{{ $files := .Files }}
+{{- range $provider, $dashboards := .Values.dashboards }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }}
+ labels:
+ app: {{ template "grafana.name" $ }}
+ chart: {{ template "grafana.chart" $ }}
+ release: {{ $.Release.Name }}
+ heritage: {{ $.Release.Service }}
+ dashboard-provider: {{ $provider }}
+data:
+{{- range $key, $value := $dashboards }}
+{{- if (or (hasKey $value "json") (hasKey $value "file")) }}
+{{ print $key | indent 2 }}.json: |-
+{{- if hasKey $value "json" }}
+{{ $value.json | indent 4 }}
+{{- end }}
+{{- if hasKey $value "file" }}
+{{ toYaml ( $files.Get $value.file ) | indent 4}}
+{{- end }}
+{{- end }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/deployment.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/deployment.yaml
new file mode 100755
index 00000000..05225e40
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/deployment.yaml
@@ -0,0 +1,358 @@
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+ name: {{ template "grafana.fullname" . }}
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ template "grafana.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- with .Values.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+ replicas: {{ .Values.replicas }}
+ selector:
+ matchLabels:
+ app: {{ template "grafana.name" . }}
+ release: {{ .Release.Name }}
+ strategy:
+ type: {{ .Values.deploymentStrategy }}
+ {{- if ne .Values.deploymentStrategy "RollingUpdate" }}
+ rollingUpdate: null
+ {{- end }}
+ template:
+ metadata:
+ labels:
+ app: {{ template "grafana.name" . }}
+ release: {{ .Release.Name }}
+{{- with .Values.podAnnotations }}
+ annotations:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ spec:
+ serviceAccountName: {{ template "grafana.serviceAccountName" . }}
+{{- if .Values.schedulerName }}
+ schedulerName: "{{ .Values.schedulerName }}"
+{{- end }}
+{{- if .Values.securityContext }}
+ securityContext:
+{{ toYaml .Values.securityContext | indent 8 }}
+{{- end }}
+{{- if .Values.priorityClassName }}
+ priorityClassName: {{ .Values.priorityClassName }}
+{{- end }}
+{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.extraInitContainers) }}
+ initContainers:
+{{- end }}
+{{- if ( and .Values.persistence.enabled .Values.persistence.initChownData ) }}
+ - name: init-chown-data
+ image: "{{ .Values.chownDataImage.repository }}:{{ .Values.chownDataImage.tag }}"
+ imagePullPolicy: {{ .Values.chownDataImage.pullPolicy }}
+ securityContext:
+ runAsUser: 0
+ command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsUser }}", "/var/lib/grafana"]
+ volumeMounts:
+ - name: storage
+ mountPath: "/var/lib/grafana"
+{{- if .Values.persistence.subPath }}
+ subPath: {{ .Values.persistence.subPath }}
+{{- end }}
+{{- end }}
+{{- if .Values.dashboards }}
+ - name: download-dashboards
+ image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}"
+ imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }}
+ command: ["sh", "/etc/grafana/download_dashboards.sh"]
+ volumeMounts:
+ - name: config
+ mountPath: "/etc/grafana/download_dashboards.sh"
+ subPath: download_dashboards.sh
+ - name: storage
+ mountPath: "/var/lib/grafana"
+{{- if .Values.persistence.subPath }}
+ subPath: {{ .Values.persistence.subPath }}
+{{- end }}
+ {{- range .Values.extraSecretMounts }}
+ - name: {{ .name }}
+ mountPath: {{ .mountPath }}
+ readOnly: {{ .readOnly }}
+ {{- end }}
+{{- end }}
+{{- if .Values.sidecar.datasources.enabled }}
+ - name: {{ template "grafana.name" . }}-sc-datasources
+ image: "{{ .Values.sidecar.image }}"
+ imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
+ env:
+ - name: METHOD
+ value: LIST
+ - name: LABEL
+ value: "{{ .Values.sidecar.datasources.label }}"
+ - name: FOLDER
+ value: "/etc/grafana/provisioning/datasources"
+ {{- if .Values.sidecar.datasources.searchNamespace }}
+ - name: NAMESPACE
+ value: "{{ .Values.sidecar.datasources.searchNamespace }}"
+ {{- end }}
+ resources:
+{{ toYaml .Values.sidecar.resources | indent 12 }}
+ volumeMounts:
+ - name: sc-datasources-volume
+ mountPath: "/etc/grafana/provisioning/datasources"
+{{- end}}
+{{- if .Values.extraInitContainers }}
+{{ toYaml .Values.extraInitContainers | indent 8 }}
+{{- end }}
+ {{- if .Values.image.pullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.image.pullSecrets }}
+ - name: {{ . }}
+ {{- end}}
+ {{- end }}
+ containers:
+{{- if .Values.sidecar.dashboards.enabled }}
+ - name: {{ template "grafana.name" . }}-sc-dashboard
+ image: "{{ .Values.sidecar.image }}"
+ imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
+ env:
+ - name: LABEL
+ value: "{{ .Values.sidecar.dashboards.label }}"
+ - name: FOLDER
+ value: "{{ .Values.sidecar.dashboards.folder }}"
+ {{- if .Values.sidecar.dashboards.searchNamespace }}
+ - name: NAMESPACE
+ value: "{{ .Values.sidecar.dashboards.searchNamespace }}"
+ {{- end }}
+ resources:
+{{ toYaml .Values.sidecar.resources | indent 12 }}
+ volumeMounts:
+ - name: sc-dashboard-volume
+ mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
+{{- end}}
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ {{- if .Values.command }}
+ command:
+ {{- range .Values.command }}
+ - {{ . }}
+ {{- end }}
+ {{- end}}
+ volumeMounts:
+ - name: config
+ mountPath: "/etc/grafana/grafana.ini"
+ subPath: grafana.ini
+ {{- if not .Values.admin.existingSecret }}
+ - name: ldap
+ mountPath: "/etc/grafana/ldap.toml"
+ subPath: ldap.toml
+ {{- end }}
+ {{- range .Values.extraConfigmapMounts }}
+ - name: {{ .name }}
+ mountPath: {{ .mountPath }}
+ readOnly: {{ .readOnly }}
+ {{- end }}
+ - name: storage
+ mountPath: "/var/lib/grafana"
+{{- if .Values.persistence.subPath }}
+ subPath: {{ .Values.persistence.subPath }}
+{{- end }}
+{{- if .Values.dashboards }}
+ {{- range $provider, $dashboards := .Values.dashboards }}
+ {{- range $key, $value := $dashboards }}
+ {{- if (or (hasKey $value "json") (hasKey $value "file")) }}
+ - name: dashboards-{{ $provider }}
+ mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json"
+ subPath: "{{ $key }}.json"
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end -}}
+{{- if .Values.dashboardsConfigMaps }}
+ {{- range keys .Values.dashboardsConfigMaps }}
+ - name: dashboards-{{ . }}
+ mountPath: "/var/lib/grafana/dashboards/{{ . }}"
+ {{- end }}
+{{- end }}
+{{- if .Values.datasources }}
+ - name: config
+ mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml"
+ subPath: datasources.yaml
+{{- end }}
+{{- if .Values.notifiers }}
+ - name: config
+ mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml"
+ subPath: notifiers.yaml
+{{- end }}
+{{- if .Values.dashboardProviders }}
+ - name: config
+ mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml"
+ subPath: dashboardproviders.yaml
+{{- end }}
+{{- if .Values.sidecar.dashboards.enabled }}
+ - name: sc-dashboard-volume
+ mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
+ - name: sc-dashboard-provider
+ mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml"
+ subPath: provider.yaml
+{{- end}}
+{{- if .Values.sidecar.datasources.enabled }}
+ - name: sc-datasources-volume
+ mountPath: "/etc/grafana/provisioning/datasources"
+{{- end}}
+ {{- range .Values.extraSecretMounts }}
+ - name: {{ .name }}
+ mountPath: {{ .mountPath }}
+ readOnly: {{ .readOnly }}
+ {{- end }}
+ {{- range .Values.extraVolumeMounts }}
+ - name: {{ .name }}
+ mountPath: {{ .mountPath }}
+ subPath: {{ .subPath | default "" }}
+ readOnly: {{ .readOnly }}
+ {{- end }}
+ {{- range .Values.extraEmptyDirMounts }}
+ - name: {{ .name }}
+ mountPath: {{ .mountPath }}
+ {{- end }}
+ ports:
+ - name: service
+ containerPort: {{ .Values.service.port }}
+ protocol: TCP
+ - name: grafana
+ containerPort: 3000
+ protocol: TCP
+ env:
+ {{- if not .Values.env.GF_SECURITY_ADMIN_USER }}
+ - name: GF_SECURITY_ADMIN_USER
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
+ key: {{ .Values.admin.userKey | default "admin-user" }}
+ {{- end }}
+ {{- if not .Values.env.GF_SECURITY_ADMIN_PASSWORD }}
+ - name: GF_SECURITY_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
+ key: {{ .Values.admin.passwordKey | default "admin-password" }}
+ {{- end }}
+ {{- if .Values.plugins }}
+ - name: GF_INSTALL_PLUGINS
+ valueFrom:
+ configMapKeyRef:
+ name: {{ template "grafana.fullname" . }}
+ key: plugins
+ {{- end }}
+ {{- if .Values.smtp.existingSecret }}
+ - name: GF_SMTP_USER
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.smtp.existingSecret }}
+ key: {{ .Values.smtp.userKey | default "user" }}
+ - name: GF_SMTP_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.smtp.existingSecret }}
+ key: {{ .Values.smtp.passwordKey | default "password" }}
+ {{- end }}
+{{- range $key, $value := .Values.env }}
+ - name: "{{ $key }}"
+ value: "{{ $value }}"
+{{- end }}
+ {{- if .Values.envFromSecret }}
+ envFrom:
+ - secretRef:
+ name: {{ .Values.envFromSecret }}
+ {{- end }}
+ livenessProbe:
+{{ toYaml .Values.livenessProbe | indent 12 }}
+ readinessProbe:
+{{ toYaml .Values.readinessProbe | indent 12 }}
+ resources:
+{{ toYaml .Values.resources | indent 12 }}
+{{- if .Values.extraContainers }}
+{{ toYaml .Values.extraContainers | indent 8}}
+{{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ volumes:
+ - name: config
+ configMap:
+ name: {{ template "grafana.fullname" . }}
+ {{- range .Values.extraConfigmapMounts }}
+ - name: {{ .name }}
+ configMap:
+ name: {{ .configMap }}
+ {{- end }}
+ {{- if .Values.dashboards }}
+ {{- range keys .Values.dashboards }}
+ - name: dashboards-{{ . }}
+ configMap:
+ name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.dashboardsConfigMaps }}
+ {{ $root := . }}
+ {{- range $provider, $name := .Values.dashboardsConfigMaps }}
+ - name: dashboards-{{ $provider }}
+ configMap:
+ name: {{ tpl $name $root }}
+ {{- end }}
+ {{- end }}
+ {{- if not .Values.admin.existingSecret }}
+ - name: ldap
+ secret:
+ {{- if .Values.ldap.existingSecret }}
+ secretName: {{ .Values.ldap.existingSecret }}
+ {{- else }}
+ secretName: {{ template "grafana.fullname" . }}
+ {{- end }}
+ items:
+ - key: ldap-toml
+ path: ldap.toml
+ {{- end }}
+ - name: storage
+ {{- if .Values.persistence.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }}
+ {{- else }}
+ emptyDir: {}
+ {{- end -}}
+ {{- if .Values.sidecar.dashboards.enabled }}
+ - name: sc-dashboard-volume
+ emptyDir: {}
+ - name: sc-dashboard-provider
+ configMap:
+ name: {{ template "grafana.fullname" . }}-config-dashboards
+ {{- end }}
+ {{- if .Values.sidecar.datasources.enabled }}
+ - name: sc-datasources-volume
+ emptyDir: {}
+ {{- end -}}
+ {{- range .Values.extraSecretMounts }}
+ - name: {{ .name }}
+ secret:
+ secretName: {{ .secretName }}
+ defaultMode: {{ .defaultMode }}
+ {{- end }}
+ {{- range .Values.extraVolumeMounts }}
+ - name: {{ .name }}
+ persistentVolumeClaim:
+ claimName: {{ .existingClaim }}
+ {{- end }}
+ {{- range .Values.extraEmptyDirMounts }}
+ - name: {{ .name }}
+ emptyDir: {}
+ {{- end }}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/ingress.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/ingress.yaml
new file mode 100755
index 00000000..48973454
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/ingress.yaml
@@ -0,0 +1,42 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "grafana.fullname" . -}}
+{{- $servicePort := .Values.service.port -}}
+{{- $ingressPath := .Values.ingress.path -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ template "grafana.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.ingress.labels }}
+{{ toYaml .Values.ingress.labels | indent 4 }}
+{{- end }}
+{{- with .Values.ingress.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+{{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+{{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ . }}
+ http:
+ paths:
+ - path: {{ $ingressPath }}
+ backend:
+ serviceName: {{ $fullName }}
+ servicePort: {{ $servicePort }}
+ {{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/podsecuritypolicy.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/podsecuritypolicy.yaml
new file mode 100755
index 00000000..d3ef3644
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/podsecuritypolicy.yaml
@@ -0,0 +1,54 @@
+{{- if .Values.rbac.pspEnabled }}
+apiVersion: extensions/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: {{ template "grafana.fullname" . }}
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ {{- if .Values.rbac.pspUseAppArmor }}
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ {{- end }}
+spec:
+ privileged: false
+ allowPrivilegeEscalation: false
+ requiredDropCapabilities:
+ # Default set from Docker, without DAC_OVERRIDE or CHOWN
+ - FOWNER
+ - FSETID
+ - KILL
+ - SETGID
+ - SETUID
+ - SETPCAP
+ - NET_BIND_SERVICE
+ - NET_RAW
+ - SYS_CHROOT
+ - MKNOD
+ - AUDIT_WRITE
+ - SETFCAP
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ - 'persistentVolumeClaim'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'RunAsAny'
+ fsGroup:
+ rule: 'RunAsAny'
+ readOnlyRootFilesystem: false
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/pvc.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/pvc.yaml
new file mode 100755
index 00000000..e1cc0329
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/pvc.yaml
@@ -0,0 +1,24 @@
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ template "grafana.fullname" . }}
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ template "grafana.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ {{- with .Values.persistence.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+ {{- end }}
+spec:
+ accessModes:
+ {{- range .Values.persistence.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size | quote }}
+ storageClassName: {{ .Values.persistence.storageClassName }}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/role.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/role.yaml
new file mode 100755
index 00000000..f6165694
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/role.yaml
@@ -0,0 +1,31 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+ name: {{ template "grafana.fullname" . }}
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+{{- with .Values.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled)) }}
+rules:
+{{- if .Values.rbac.pspEnabled }}
+- apiGroups: ['extensions']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames: [{{ template "grafana.fullname" . }}]
+{{- end }}
+{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }}
+- apiGroups: [""] # "" indicates the core API group
+ resources: ["configmaps"]
+ verbs: ["get", "watch", "list"]
+{{- end }}
+{{- else }}
+rules: []
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/rolebinding.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/rolebinding.yaml
new file mode 100755
index 00000000..8b6671b8
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/rolebinding.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+ name: {{ template "grafana.fullname" . }}
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+{{- with .Values.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "grafana.fullname" . }}
+subjects:
+- kind: ServiceAccount
+ name: {{ template "grafana.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+{{- if .Values.rbac.namespaced }}
+roleRef:
+ kind: Role
+ name: {{ template "grafana.fullname" . }}
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/secret.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/secret.yaml
new file mode 100755
index 00000000..a1ea5798
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/secret.yaml
@@ -0,0 +1,22 @@
+{{- if not .Values.admin.existingSecret }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "grafana.fullname" . }}
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ template "grafana.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+type: Opaque
+data:
+ admin-user: {{ .Values.adminUser | b64enc | quote }}
+ {{- if .Values.adminPassword }}
+ admin-password: {{ .Values.adminPassword | b64enc | quote }}
+ {{- else }}
+ admin-password: {{ randAlphaNum 40 | b64enc | quote }}
+ {{- end }}
+ {{- if not .Values.ldap.existingSecret }}
+ ldap-toml: {{ .Values.ldap.config | b64enc | quote }}
+ {{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/service.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/service.yaml
new file mode 100755
index 00000000..87fac70c
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/service.yaml
@@ -0,0 +1,49 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "grafana.fullname" . }}
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ template "grafana.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+{{- if .Values.service.labels }}
+{{ toYaml .Values.service.labels | indent 4 }}
+{{- end }}
+{{- with .Values.service.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
+spec:
+{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
+ type: ClusterIP
+ {{- if .Values.service.clusterIP }}
+ clusterIP: {{ .Values.service.clusterIP }}
+ {{end}}
+{{- else if eq .Values.service.type "LoadBalancer" }}
+ type: {{ .Values.service.type }}
+ {{- if .Values.service.loadBalancerIP }}
+ loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+ {{- end }}
+ {{- if .Values.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
+ {{- end -}}
+{{- else }}
+ type: {{ .Values.service.type }}
+{{- end }}
+{{- if .Values.service.externalIPs }}
+ externalIPs:
+{{ toYaml .Values.service.externalIPs | indent 4 }}
+{{- end }}
+ ports:
+ - name: service
+ port: {{ .Values.service.port }}
+ protocol: TCP
+ targetPort: {{ .Values.service.targetPort }}
+{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
+ nodePort: {{.Values.service.nodePort}}
+{{ end }}
+ selector:
+ app: {{ template "grafana.name" . }}
+ release: {{ .Release.Name }}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/serviceaccount.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/serviceaccount.yaml
new file mode 100755
index 00000000..04601d05
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/templates/serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if .Values.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app: {{ template "grafana.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ name: {{ template "grafana.serviceAccountName" . }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/visualization/charts/grafana/values.yaml b/vnfs/DAaaS/deploy/visualization/charts/grafana/values.yaml
new file mode 100755
index 00000000..74b511cf
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/charts/grafana/values.yaml
@@ -0,0 +1,378 @@
+rbac:
+ create: true
+ pspEnabled: true
+ pspUseAppArmor: true
+ namespaced: false
+serviceAccount:
+ create: true
+ name:
+
+replicas: 1
+
+deploymentStrategy: RollingUpdate
+
+readinessProbe:
+ httpGet:
+ path: /api/health
+ port: 3000
+
+livenessProbe:
+ httpGet:
+ path: /api/health
+ port: 3000
+ initialDelaySeconds: 60
+ timeoutSeconds: 30
+ failureThreshold: 10
+
+image:
+ repository: grafana/grafana
+ tag: 6.0.2
+ pullPolicy: IfNotPresent
+
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ # pullSecrets:
+ # - myRegistrKeySecretName
+
+securityContext:
+ runAsUser: 472
+ fsGroup: 472
+
+
+extraConfigmapMounts: []
+ # - name: certs-configmap
+ # mountPath: /etc/grafana/ssl/
+ # configMap: certs-configmap
+ # readOnly: true
+
+
+extraEmptyDirMounts: []
+ # - name: provisioning-notifiers
+ # mountPath: /etc/grafana/provisioning/notifiers
+
+
+## Assign a PriorityClassName to pods if set
+# priorityClassName:
+
+downloadDashboardsImage:
+ repository: appropriate/curl
+ tag: latest
+ pullPolicy: IfNotPresent
+
+chownDataImage:
+ repository: busybox
+ tag: 1.30.0
+ pullPolicy: IfNotPresent
+
+## Pod Annotations
+# podAnnotations: {}
+
+## Deployment annotations
+# annotations: {}
+
+## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
+## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
+## ref: http://kubernetes.io/docs/user-guide/services/
+##
+service:
+ type: ClusterIP
+ port: 80
+ targetPort: 3000
+ # targetPort: 4181 To be used with a proxy extraContainer
+ annotations: {}
+ labels: {}
+
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ labels: {}
+ path: /
+ hosts:
+ - chart-example.local
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+# limits:
+# cpu: 100m
+# memory: 128Mi
+# requests:
+# cpu: 100m
+# memory: 128Mi
+
+## Node labels for pod assignment
+## ref: https://kubernetes.io/docs/user-guide/node-selection/
+#
+nodeSelector: {}
+
+## Tolerations for pod assignment
+## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+
+## Affinity for pod assignment
+## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+##
+affinity: {}
+
+extraInitContainers: []
+
+## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
+extraContainers: |
+# - name: proxy
+# image: quay.io/gambol99/keycloak-proxy:latest
+# args:
+# - -provider=github
+# - -client-id=
+# - -client-secret=
+# - -github-org=<ORG_NAME>
+# - -email-domain=*
+# - -cookie-secret=
+# - -http-address=http://0.0.0.0:4181
+# - -upstream-url=http://127.0.0.1:3000
+# ports:
+# - name: proxy-web
+# containerPort: 4181
+
+## Enable persistence using Persistent Volume Claims
+## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+##
+persistence:
+ enabled: false
+ initChownData: true
+ # storageClassName: default
+ accessModes:
+ - ReadWriteOnce
+ size: 10Gi
+ # annotations: {}
+ # subPath: ""
+ # existingClaim:
+
+# Administrator credentials when not using an existing secret (see below)
+adminUser: admin
+# adminPassword: strongpassword
+
+# Use an existing secret for the admin user.
+admin:
+ existingSecret: ""
+ userKey: admin-user
+ passwordKey: admin-password
+
+## Define command to be executed at startup by grafana container
+## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/)
+## Default is "run.sh" as defined in grafana's Dockerfile
+# command:
+# - "sh"
+# - "/run.sh"
+
+## Use an alternate scheduler, e.g. "stork".
+## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+# schedulerName:
+
+## Extra environment variables that will be pass onto deployment pods
+env: {}
+
+## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
+## This can be useful for auth tokens, etc
+envFromSecret: ""
+
+## Additional grafana server secret mounts
+# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
+extraSecretMounts: []
+ # - name: secret-files
+ # mountPath: /etc/secrets
+ # secretName: grafana-secret-files
+ # readOnly: true
+
+## Additional grafana server volume mounts
+# Defines additional volume mounts.
+extraVolumeMounts: []
+ # - name: extra-volume
+ # mountPath: /mnt/volume
+ # readOnly: true
+ # existingClaim: volume-claim
+
+## Pass the plugins you want installed as a list.
+##
+plugins: []
+ # - digrich-bubblechart-panel
+ # - grafana-clock-panel
+
+## Configure grafana datasources
+## ref: http://docs.grafana.org/administration/provisioning/#datasources
+##
+datasources:
+ datasources.yaml:
+ apiVersion: 1
+ datasources:
+ - name: Prometheus
+ type: prometheus
+ url: http://localhost:9090
+ isDefault: true
+ - name: M3DB
+ type: prometheus
+ url: http://m3coordinator-m3db-cluster.edge1.svc.cluster.local:7201
+ access: proxy
+
+## Configure notifiers
+## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
+##
+notifiers: {}
+# notifiers.yaml:
+# notifiers:
+# - name: email-notifier
+# type: email
+# uid: email1
+# # either:
+# org_id: 1
+# # or
+# org_name: Main Org.
+# is_default: true
+# settings:
+# addresses: an_email_address@example.com
+# delete_notifiers:
+
+## Configure grafana dashboard providers
+## ref: http://docs.grafana.org/administration/provisioning/#dashboards
+##
+## `path` must be /var/lib/grafana/dashboards/<provider_name>
+##
+dashboardProviders: {}
+# dashboardproviders.yaml:
+# apiVersion: 1
+# providers:
+# - name: 'default'
+# orgId: 1
+# folder: ''
+# type: file
+# disableDeletion: false
+# editable: true
+# options:
+# path: /var/lib/grafana/dashboards/default
+
+## Configure grafana dashboard to import
+## NOTE: To use dashboards you must also enable/configure dashboardProviders
+## ref: https://grafana.com/dashboards
+##
+## dashboards per provider, use provider name as key.
+##
+dashboards: {}
+ # default:
+ # some-dashboard:
+ # json: |
+ # $RAW_JSON
+ # custom-dashboard:
+ # file: dashboards/custom-dashboard.json
+ # prometheus-stats:
+ # gnetId: 2
+ # revision: 2
+ # datasource: Prometheus
+ # local-dashboard:
+ # url: https://example.com/repository/test.json
+ # local-dashboard-base64:
+ # url: https://example.com/repository/test-b64.json
+ # b64content: true
+
+## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value.
+## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
+## ConfigMap data example:
+##
+## data:
+## example-dashboard.json: |
+## RAW_JSON
+##
+dashboardsConfigMaps: {}
+# default: ""
+
+## Grafana's primary configuration
+## NOTE: values in map will be converted to ini format
+## ref: http://docs.grafana.org/installation/configuration/
+##
+grafana.ini:
+ paths:
+ data: /var/lib/grafana/data
+ logs: /var/log/grafana
+ plugins: /var/lib/grafana/plugins
+ provisioning: /etc/grafana/provisioning
+ analytics:
+ check_for_updates: true
+ log:
+ mode: console
+ grafana_net:
+ url: https://grafana.net
+## LDAP Authentication can be enabled with the following values on grafana.ini
+## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
+ # auth.ldap:
+ # enabled: true
+ # allow_sign_up: true
+ # config_file: /etc/grafana/ldap.toml
+
+## Grafana's LDAP configuration
+## Templated by the template in _helpers.tpl
+## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
+## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
+## ref: http://docs.grafana.org/installation/ldap/#configuration
+ldap:
+ # `existingSecret` is a reference to an existing secret containing the ldap configuration
+ # for Grafana in a key `ldap-toml`.
+ existingSecret: ""
+ # `config` is the content of `ldap.toml` that will be stored in the created secret
+ config: ""
+ # config: |-
+ # verbose_logging = true
+
+ # [[servers]]
+ # host = "my-ldap-server"
+ # port = 636
+ # use_ssl = true
+ # start_tls = false
+ # ssl_skip_verify = false
+ # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com"
+
+## Grafana's SMTP configuration
+## NOTE: To enable, grafana.ini must be configured with smtp.enabled
+## ref: http://docs.grafana.org/installation/configuration/#smtp
+smtp:
+ # `existingSecret` is a reference to an existing secret containing the smtp configuration
+ # for Grafana.
+ existingSecret: ""
+ userKey: "user"
+ passwordKey: "password"
+
+## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
+## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
+sidecar:
+ image: kiwigrid/k8s-sidecar:0.0.13
+ imagePullPolicy: IfNotPresent
+ resources: {}
+# limits:
+# cpu: 100m
+# memory: 100Mi
+# requests:
+# cpu: 50m
+# memory: 50Mi
+ dashboards:
+ enabled: false
+ # label that the configmaps with dashboards are marked with
+ label: grafana_dashboard
+ # folder in the pod that should hold the collected dashboards
+ folder: /tmp/dashboards
+ # If specified, the sidecar will search for dashboard config-maps inside this namespace.
+ # Otherwise the namespace in which the sidecar is running will be used.
+ # It's also possible to specify ALL to search in all namespaces
+ searchNamespace: null
+ datasources:
+ enabled: false
+ # label that the configmaps with datasources are marked with
+ label: grafana_datasource
+ # If specified, the sidecar will search for datasource config-maps inside this namespace.
+ # Otherwise the namespace in which the sidecar is running will be used.
+ # It's also possible to specify ALL to search in all namespaces
+ searchNamespace: null
diff --git a/vnfs/DAaaS/deploy/visualization/templates/NOTES.txt b/vnfs/DAaaS/deploy/visualization/templates/NOTES.txt
new file mode 100644
index 00000000..edd06657
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/templates/NOTES.txt
@@ -0,0 +1,37 @@
+
+*************************GRAFANA*********************************
+
+1. Get your '{{ .Values.adminUser }}' user password by running:
+
+ kubectl get secret --namespace {{ .Release.Namespace }} {{ .Release.Name }}-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
+
+2. {{ if .Values.ingress.enabled }}
+ From outside the cluster, the server URL(s) are:
+{{- range .Values.ingress.hosts }}
+ http://{{ . }}
+{{- end }}
+{{ else }}
+ Get the Grafana URL to visit by running these commands in the same shell:
+{{ if contains "NodePort" .Values.service.type -}}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{ else if contains "LoadBalancer" .Values.service.type -}}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "grafana.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ http://$SERVICE_IP:{{ .Values.service.port -}}
+{{ else if contains "ClusterIP" .Values.service.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app=grafana,release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 3000
+{{- end }}
+{{- end }}
+
+3. Login with the password from step 1 and the username: {{ .Values.adminUser }}
+
+{{- if not .Values.persistence.enabled }}
+#################################################################################
+###### WARNING: Persistence is disabled!!! You will lose your data when #####
+###### the Grafana pod is terminated. #####
+#################################################################################
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/visualization/values.yaml b/vnfs/DAaaS/deploy/visualization/values.yaml
new file mode 100644
index 00000000..7e1d7438
--- /dev/null
+++ b/vnfs/DAaaS/deploy/visualization/values.yaml
@@ -0,0 +1,54 @@
+# Default values for visualization.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+adminUser: admin
+
+image:
+ repository: nginx
+ tag: stable
+ pullPolicy: IfNotPresent
+
+nameOverride: ""
+fullnameOverride: ""
+
+persistence:
+ enabled: false
+
+service:
+ type: ClusterIP
+ port: 80
+
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths: []
+
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}