From e3e6c103f6e1cd1431ddacd23d500f39fbd1bcee Mon Sep 17 00:00:00 2001 From: Rajamohan Raj Date: Tue, 4 Aug 2020 00:18:16 +0000 Subject: Updating m3db & m3db operator charts In this patch, updated all the charts related to m3db. Developed scripts for deployment m3db stack through orchestrator. Issue-ID: MULTICLOUD-1112 Signed-off-by: Rajamohan Raj Change-Id: I42677809709fc4d12f16a156e563d6618a8f8437 --- .../helm/prometheus-operator/add_m3db_remote.yaml | 4 +- .../prometheus-operator/charts/grafana/values.yaml | 2 +- .../app2/helm/prometheus-operator/diff.txt | 145 --------------------- .../app2/helm/prometheus-operator/values.yaml | 33 ++--- 4 files changed, 21 insertions(+), 163 deletions(-) delete mode 100644 kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/diff.txt (limited to 'kud/tests/vnfs/comp-app/collection/app2') diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml index c66cb131..f997309c 100644 --- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml +++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml @@ -1,6 +1,6 @@ spec: remoteWrite: - - url: "http://m3coordinator-m3db-cluster.training.svc.cluster.local:7201/api/v1/prom/remote/write" + - url: "http://192.168.121.15:32701/api/v1/prom/remote/write" writeRelabelConfigs: - targetLabel: metrics_storage - replacement: m3db_remote \ No newline at end of file + replacement: m3db_remote diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml index d4ff7b30..a2642660 100755 --- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml +++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml @@ -61,7 +61,7 @@ image: # - myRegistrKeySecretName testFramework: - enabled: true + enabled: false image: "bats/bats" tag: "v1.1.0" imagePullPolicy: IfNotPresent diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/diff.txt b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/diff.txt deleted file mode 100644 index a0d0d3ed..00000000 --- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/diff.txt +++ /dev/null @@ -1,145 +0,0 @@ -8a9,12 -> ## Override the deployment namespace -> ## -> namespaceOverride: "" -> -33a38 -> kubeApiserverSlos: true -96c101 -< enabled: false ---- -> enabled: true -107a113 -> annotations: {} -168c174 -< # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}` ---- -> # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}` -169a176,177 -> # {{ end }} -> # {{ end }} -317c325 -< ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata ---- -> ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata -443a452 -> runAsGroup: 2000 -473c482,483 -< enabled: false ---- -> enabled: true -> namespaceOverride: "" -539c549 -< ## Configure additional grafana datasources ---- -> ## Configure additional grafana datasources (passed through tpl) -552c562 -< # url: https://prometheus.svc:9090 ---- -> # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090 -641a652,660 -> ## Enable scraping /metrics/probes from kubelet's service -> ## -> probes: true -> -> ## Enable scraping /metrics/resource from kubelet's service -> ## -> resource: true -> # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource -> resourcePath: "/metrics/resource/v1alpha1" -655a675,688 -> ## Metric relabellings to apply to samples before ingestion -> ## -> probesMetricRelabelings: [] -> # - sourceLabels: [__name__, image] -> # separator: ; -> # regex: container_([a-z_]+); -> # replacement: $1 -> # action: drop -> # - sourceLabels: [__name__] -> # separator: ; -> # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) -> # replacement: $1 -> # action: drop -> -668a702,721 -> probesRelabelings: -> - sourceLabels: [__metrics_path__] -> targetLabel: metrics_path -> # - sourceLabels: [__meta_kubernetes_pod_node_name] -> # separator: ; -> # regex: ^(.*)$ -> # targetLabel: nodename -> # replacement: $1 -> # action: replace -> -> resourceRelabelings: -> - sourceLabels: [__metrics_path__] -> targetLabel: metrics_path -> # - sourceLabels: [__meta_kubernetes_pod_node_name] -> # separator: ; -> # regex: ^(.*)$ -> # targetLabel: nodename -> # replacement: $1 -> # action: replace -> -986c1039 -< enabled: false ---- -> enabled: true -1011a1065 -> namespaceOverride: "" -1020c1074 -< enabled: false ---- -> enabled: true -1056a1111 -> namespaceOverride: "" -1070a1126 -> # Only for prometheusOperator.image.tag < v0.39.0 -1094c1150 -< tag: v1.2.0 ---- -> tag: v1.2.1 -1165c1221 -< cleanupCustomResource: true ---- -> cleanupCustomResource: false -1255a1312,1313 -> fsGroup: 65534 -> runAsGroup: 65534 -1340c1398 -< type: NodePort ---- -> type: ClusterIP -1510c1568 -< tag: v2.16.0 ---- -> tag: v2.18.1 -1686c1744 -< ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata ---- -> ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata -1750a1809,1813 -> # Additional volumes on the output StatefulSet definition. -> volumes: [] -> # Additional VolumeMounts on the output StatefulSet definition. -> volumeMounts: [] -> -1828a1892 -> runAsGroup: 2000 -1868,1879c1932 -< additionalServiceMonitors: -< - name: service-monitor-cadvisor -< additionalLabels: -< collector: cadvisor -< jobLabel: cadvisor -< selector: -< matchLabels: -< app: cadvisor -< endpoints: -< - port: cadvisor-prometheus -< interval: 10s -< path: /metrics ---- -> additionalServiceMonitors: [] diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml index f296ca50..40de0165 100755 --- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml +++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml @@ -474,7 +474,7 @@ alertmanager: ## Using default values from https://github.com/helm/charts/blob/master/stable/grafana/values.yaml ## grafana: - enabled: false + enabled: true ## Deploy default dashboards. ## @@ -589,7 +589,7 @@ grafana: ## Component scraping the kube api server ## kubeApiServer: - enabled: false + enabled: true tlsConfig: serverName: kubernetes insecureSkipVerify: false @@ -626,7 +626,7 @@ kubeApiServer: ## Component scraping the kubelet and kubelet-hosted cAdvisor ## kubelet: - enabled: false + enabled: true namespace: kube-system serviceMonitor: @@ -698,7 +698,7 @@ kubelet: ## Component scraping the kube controller manager ## kubeControllerManager: - enabled: false + enabled: true ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on ## @@ -751,7 +751,7 @@ kubeControllerManager: ## Component scraping coreDns. Use either this or kubeDns ## coreDns: - enabled: false + enabled: true service: port: 9153 targetPort: 9153 @@ -831,7 +831,7 @@ kubeDns: ## Component scraping etcd ## kubeEtcd: - enabled: false + enabled: true ## If your etcd is not deployed as a pod, specify IPs it can be found on ## @@ -891,7 +891,7 @@ kubeEtcd: ## Component scraping kube scheduler ## kubeScheduler: - enabled: false + enabled: true ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on ## @@ -944,7 +944,7 @@ kubeScheduler: ## Component scraping kube proxy ## kubeProxy: - enabled: false + enabled: true ## If your kube proxy is not deployed as a pod, specify IPs it can be found on ## @@ -1731,7 +1731,7 @@ prometheus: ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotewritespec remoteWrite: # - url: http://remote1/push - - url: "http://m3coordinator-m3db-cluster.training.svc.cluster.local:7201/api/v1/prom/remote/write" + - url: "http://192.168.121.15:32701/api/v1/prom/remote/write" writeRelabelConfigs: - targetLabel: metrics_storage replacement: m3db_remote @@ -1769,13 +1769,16 @@ prometheus: ## The scrape configuraiton example below will find master nodes, provided they have the name .*mst.*, relabel the ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes ## - additionalScrapeConfigs: - - job_name: 'm3db' - static_configs: - - targets: ['m3db-cluster-rep0-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004','m3db-cluster-rep1-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004', 'm3db-cluster-rep2-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004'] - - job_name: 'm3coordinator' + additionalScrapeConfigs: + - job_name: 'm3' static_configs: - - targets: ['m3db-cluster-rep0-0.m3dbnode-m3db-cluster.training.svc.cluster.local:7203'] + - targets: ['192.168.121.15:32701'] + #- job_name: 'm3db' + #static_configs: + #- targets: ['m3db-cluster-rep0-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004','m3db-cluster-rep1-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004', 'm3db-cluster-rep2-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004'] + #- job_name: 'm3coordinator' + #static_configs: + #- targets: ['m3db-cluster-rep0-0.m3dbnode-m3db-cluster.training.svc.cluster.local:7203'] # - job_name: kube-etcd # kubernetes_sd_configs: # - role: node -- cgit 1.2.3-korg