diff options
56 files changed, 2935 insertions, 35 deletions
diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml new file mode 100644 index 00000000..c66cb131 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml @@ -0,0 +1,6 @@ +spec: + remoteWrite: + - url: "http://m3coordinator-m3db-cluster.training.svc.cluster.local:7201/api/v1/prom/remote/write" + writeRelabelConfigs: + - targetLabel: metrics_storage + replacement: m3db_remote
\ No newline at end of file diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml index 25ea7888..d4ff7b30 100755 --- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml +++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml @@ -116,6 +116,7 @@ podPortName: grafana service: type: NodePort port: 80 + nodePort: 30092 targetPort: 3000 # targetPort: 4181 To be used with a proxy extraContainer annotations: {} diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml index 15c3fa22..d493e23e 100755 --- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml +++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml @@ -474,7 +474,7 @@ alertmanager: ## Using default values from https://github.com/helm/charts/blob/master/stable/grafana/values.yaml ## grafana: - enabled: false + enabled: true ## Deploy default dashboards. ## @@ -1723,15 +1723,21 @@ prometheus: ## The remote_read spec configuration for Prometheus. ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotereadspec remoteRead: [] - # - url: http://remote1/read + # # - url: http://remote1/read + # - url: "http://m3coordinator-m3db-cluster.training.svc.cluster.local:7201/api/v1/prom/remote/read" + # read_recent: true ## The remote_write spec configuration for Prometheus. ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotewritespec - remoteWrite: [] + remoteWrite: # - url: http://remote1/push + - url: "http://m3coordinator-m3db-cluster.training.svc.cluster.local:7201/api/v1/prom/remote/write" + writeRelabelConfigs: + - targetLabel: metrics_storage + replacement: m3db_remote ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature - remoteWriteDashboards: false + remoteWriteDashboards: true ## Resource limits & requests ## @@ -1763,7 +1769,13 @@ prometheus: ## The scrape configuraiton example below will find master nodes, provided they have the name .*mst.*, relabel the ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes ## - additionalScrapeConfigs: [] + additionalScrapeConfigs: + - job_name: 'm3db' + static_configs: + - targets: ['m3db-cluster-rep0-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004','m3db-cluster-rep1-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004', 'm3db-cluster-rep2-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004'] + - job_name: 'm3coordinator' + static_configs: + - targets: ['m3db-cluster-rep0-0.m3dbnode-m3db-cluster.training.svc.cluster.local:7203'] # - job_name: kube-etcd # kubernetes_sd_configs: # - role: node diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/.helmignore b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/Chart.yaml b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/Chart.yaml new file mode 100644 index 00000000..10d9d542 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/Chart.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +name: m3db +version: 0.1.1 diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/NOTES.txt b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/NOTES.txt new file mode 100644 index 00000000..ee7ee3d7 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/NOTES.txt @@ -0,0 +1 @@ +M3DB Cluster {{ .Values.m3dbCluster.name }} has been created
\ No newline at end of file diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/_helpers.tpl b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/_helpers.tpl new file mode 100644 index 00000000..36544b12 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "m3db.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "m3db.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "m3db.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/configmap.yaml b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/configmap.yaml new file mode 100644 index 00000000..358e201e --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/configmap.yaml @@ -0,0 +1,218 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.m3dbCluster.configMapName }} +data: + m3.yml: |+ + coordinator: + listenAddress: + type: "config" + value: "0.0.0.0:7201" + metrics: + scope: + prefix: "coordinator" + prometheus: + handlerPath: /metrics + listenAddress: 0.0.0.0:7203 + sanitization: prometheus + samplingRate: 1.0 + extended: none + tagOptions: + idScheme: quoted + local: + namespaces: + - namespace: "default" + type: unaggregated + retention: 48h + db: + logging: + level: info + + metrics: + prometheus: + handlerPath: /metrics + sanitization: prometheus + samplingRate: 1.0 + extended: detailed + + listenAddress: 0.0.0.0:9000 + clusterListenAddress: 0.0.0.0:9001 + httpNodeListenAddress: 0.0.0.0:9002 + httpClusterListenAddress: 0.0.0.0:9003 + debugListenAddress: 0.0.0.0:9004 + + hostID: + resolver: file + file: + path: /etc/m3db/pod-identity/identity + timeout: 5m + + client: + writeConsistencyLevel: majority + readConsistencyLevel: unstrict_majority + writeTimeout: 10s + fetchTimeout: 15s + connectTimeout: 20s + writeRetry: + initialBackoff: 500ms + backoffFactor: 3 + maxRetries: 2 + jitter: true + fetchRetry: + initialBackoff: 500ms + backoffFactor: 2 + maxRetries: 3 + jitter: true + backgroundHealthCheckFailLimit: 4 + backgroundHealthCheckFailThrottleFactor: 0.5 + + gcPercentage: 100 + + writeNewSeriesAsync: true + writeNewSeriesLimitPerSecond: 1048576 + writeNewSeriesBackoffDuration: 2ms + + bootstrap: + bootstrappers: + - filesystem + - commitlog + - peers + - uninitialized_topology + fs: + numProcessorsPerCPU: 0.125 + + commitlog: + flushMaxBytes: 524288 + flushEvery: 1s + queue: + calculationType: fixed + size: 2097152 + blockSize: 10m + + fs: + filePathPrefix: /var/lib/m3db + writeBufferSize: 65536 + dataReadBufferSize: 65536 + infoReadBufferSize: 128 + seekReadBufferSize: 4096 + throughputLimitMbps: 100.0 + throughputCheckEvery: 128 + + repair: + enabled: false + interval: 2h + offset: 30m + jitter: 1h + throttle: 2m + checkInterval: 1m + + pooling: + blockAllocSize: 16 + type: simple + seriesPool: + size: 262144 + lowWatermark: 0.7 + highWatermark: 1.0 + blockPool: + size: 262144 + lowWatermark: 0.7 + highWatermark: 1.0 + encoderPool: + size: 262144 + lowWatermark: 0.7 + highWatermark: 1.0 + closersPool: + size: 104857 + lowWatermark: 0.7 + highWatermark: 1.0 + contextPool: + size: 262144 + lowWatermark: 0.7 + highWatermark: 1.0 + segmentReaderPool: + size: 16384 + lowWatermark: 0.7 + highWatermark: 1.0 + iteratorPool: + size: 2048 + lowWatermark: 0.7 + highWatermark: 1.0 + fetchBlockMetadataResultsPool: + size: 65536 + capacity: 32 + lowWatermark: 0.7 + highWatermark: 1.0 + fetchBlocksMetadataResultsPool: + size: 32 + capacity: 4096 + lowWatermark: 0.7 + highWatermark: 1.0 + hostBlockMetadataSlicePool: + size: 131072 + capacity: 3 + lowWatermark: 0.7 + highWatermark: 1.0 + blockMetadataPool: + size: 65536 + lowWatermark: 0.7 + highWatermark: 1.0 + blockMetadataSlicePool: + size: 65536 + capacity: 32 + lowWatermark: 0.7 + highWatermark: 1.0 + blocksMetadataPool: + size: 65536 + lowWatermark: 0.7 + highWatermark: 1.0 + blocksMetadataSlicePool: + size: 32 + capacity: 4096 + lowWatermark: 0.7 + highWatermark: 1.0 + identifierPool: + size: 262144 + lowWatermark: 0.7 + highWatermark: 1.0 + bytesPool: + buckets: + - capacity: 16 + size: 524288 + lowWatermark: 0.7 + highWatermark: 1.0 + - capacity: 32 + size: 262144 + lowWatermark: 0.7 + highWatermark: 1.0 + - capacity: 64 + size: 131072 + lowWatermark: 0.7 + highWatermark: 1.0 + - capacity: 128 + size: 65536 + lowWatermark: 0.7 + highWatermark: 1.0 + - capacity: 256 + size: 65536 + lowWatermark: 0.7 + highWatermark: 1.0 + - capacity: 1440 + size: 16384 + lowWatermark: 0.7 + highWatermark: 1.0 + - capacity: 4096 + size: 8192 + lowWatermark: 0.7 + highWatermark: 1.0 + config: + service: + env: default_env + zone: embedded + service: m3db + cacheDir: /var/lib/m3kv + etcdClusters: + - zone: embedded + endpoints: + - http://etcd-0.etcd:2380 + - http://etcd-1.etcd:2380 + - http://etcd-2.etcd:2380
\ No newline at end of file diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/etcd-cluster.yaml b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/etcd-cluster.yaml new file mode 100644 index 00000000..802354bf --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/etcd-cluster.yaml @@ -0,0 +1,20 @@ +apiVersion: "etcd.database.coreos.com/v1beta2" +kind: EtcdCluster +metadata: + name: {{ .Release.Name }}-{{ .Values.etcdCluster.name }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "m3db.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + etcd.database.coreos.com/scope: clusterwide +spec: + size: {{ .Values.etcdCluster.size }} + version: "{{ .Values.etcdCluster.version }}" + pod: +{{ toYaml .Values.etcdCluster.pod | indent 4 }} + {{- if .Values.etcdCluster.enableTLS }} + TLS: +{{ toYaml .Values.etcdCluster.tls | indent 4 }} + {{- end }} diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/m3dbcluster.yaml b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/m3dbcluster.yaml new file mode 100644 index 00000000..8ce16a74 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/m3dbcluster.yaml @@ -0,0 +1,26 @@ +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +metadata: + name: {{ .Values.m3dbCluster.name }} +spec: + image: {{ .Values.m3dbCluster.image.repository }}:{{ .Values.m3dbCluster.image.tag }} + replicationFactor: {{ .Values.m3dbCluster.replicationFactor }} + numberOfShards: {{ .Values.m3dbCluster.numberOfShards }} + etcdEndpoints: + - http://{{ .Release.Name }}-{{ .Values.etcdCluster.name }}:2379 + isolationGroups: +{{ toYaml .Values.m3dbCluster.isolationGroups | indent 4 }} + tolerations: +{{ toYaml .Values.m3dbCluster.tolerations | indent 4 }} + namespaces: +{{ toYaml .Values.m3dbCluster.namespaces | indent 4 }} + configMapName: {{ .Values.m3dbCluster.configMapName }} + resources: + requests: + memory: 4Gi + cpu: '1' + limits: + memory: 12Gi + cpu: '4' + + diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/values.yaml b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/values.yaml new file mode 100644 index 00000000..515fd2ad --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/values.yaml @@ -0,0 +1,52 @@ +m3dbCluster: + name: m3db-cluster + image: + repository: quay.io/m3db/m3dbnode + tag: v0.10.2 + replicationFactor: 3 + numberOfShards: 256 + isolationGroups: + - name: us-west1-a + numInstances: 1 + - name: us-west1-b + numInstances: 1 + - name: us-west1-c + numInstances: 1 + namespaces: + - name: collectd + preset: 10s:2d + configMapName: m3-configuration + tolerations: {} + +etcdCluster: + name: etcd + size: 3 + version: 3.4.3 + image: + repository: quay.io/coreos/etcd + tag: v3.4.3 + pullPolicy: Always + enableTLS: false + # TLS configs + tls: + static: + member: + peerSecret: etcd-peer-tls + serverSecret: etcd-server-tls + operatorSecret: etcd-client-tls + ## etcd cluster pod specific values + ## Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-members-cluster-with-resource-requirement + pod: + ## Antiaffinity for etcd pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + antiAffinity: false + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + ## Node labels for etcd pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/.helmignore b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/Chart.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/Chart.yaml new file mode 100644 index 00000000..01c1eb03 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A collection of operator Helm charts. +name: operator +version: 0.1.0 diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/.helmignore b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/Chart.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/Chart.yaml new file mode 100755 index 00000000..d0ea8910 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +description: CoreOS etcd-operator Helm chart for Kubernetes +name: etcd-operator +version: 0.10.0 +appVersion: 0.9.4 +home: https://github.com/coreos/etcd-operator +icon: https://raw.githubusercontent.com/coreos/etcd/master/logos/etcd-horizontal-color.png +sources: +- https://github.com/coreos/etcd-operator +maintainers: +- name: lachie83 + email: lachlan@deis.com +- name: alejandroEsc + email: jaescobar.cell@gmail.com diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/OWNERS b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/OWNERS new file mode 100644 index 00000000..1385151c --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/OWNERS @@ -0,0 +1,6 @@ +approvers: +- lachie83 +- alejandroEsc +reviewers: +- lachie83 +- alejandroEsc diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/README.md b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/README.md new file mode 100644 index 00000000..417b19b4 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/README.md @@ -0,0 +1,169 @@ +# CoreOS etcd-operator + +[etcd-operator](https://coreos.com/blog/introducing-the-etcd-operator.html) Simplify etcd cluster +configuration and management. + +__DISCLAIMER:__ While this chart has been well-tested, the etcd-operator is still currently in beta. +Current project status is available [here](https://github.com/coreos/etcd-operator). + +## Introduction + +This chart bootstraps an etcd-operator and allows the deployment of etcd-cluster(s). + +## Official Documentation + +Official project documentation found [here](https://github.com/coreos/etcd-operator) + +## Prerequisites + +- Kubernetes 1.4+ with Beta APIs enabled +- __Suggested:__ PV provisioner support in the underlying infrastructure to support backups + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install stable/etcd-operator --name my-release +``` + +Note that by default chart installs etcd operator only. If you want to also deploy `etcd` cluster, enable `customResources.createEtcdClusterCRD` flag: +```bash +$ helm install --name my-release --set customResources.createEtcdClusterCRD=true stable/etcd-operator +``` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components EXCEPT the persistent volume. + +## Updating +Once you have a new chart version, you can update your deployment with: +``` +$ helm upgrade my-release stable/etcd-operator +``` + +Example resizing etcd cluster from `3` to `5` nodes during helm upgrade: +```bash +$ helm upgrade my-release --set etcdCluster.size=5 --set customResources.createEtcdClusterCRD=true stable/etcd-operator +``` + +## Configuration + +The following table lists the configurable parameters of the etcd-operator chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------------------- | -------------------------------------------------------------------- | ---------------------------------------------- | +| `rbac.create` | Install required RBAC service account, roles and rolebindings | `true` | +| `rbac.apiVersion` | RBAC api version `v1alpha1\|v1beta1` | `v1beta1` | +| `serviceAccount.create` | Flag to create the service account | `true` | +| `serviceAccount.name` | Name of the service account resource when RBAC is enabled | `etcd-operator-sa` | +| `deployments.etcdOperator` | Deploy the etcd cluster operator | `true` | +| `deployments.backupOperator` | Deploy the etcd backup operator | `true` | +| `deployments.restoreOperator` | Deploy the etcd restore operator | `true` | +| `customResources.createEtcdClusterCRD` | Create a custom resource: EtcdCluster | `false` | +| `customResources.createBackupCRD` | Create an a custom resource: EtcdBackup | `false` | +| `customResources.createRestoreCRD` | Create an a custom resource: EtcdRestore | `false` | +| `etcdOperator.name` | Etcd Operator name | `etcd-operator` | +| `etcdOperator.replicaCount` | Number of operator replicas to create (only 1 is supported) | `1` | +| `etcdOperator.image.repository` | etcd-operator container image | `quay.io/coreos/etcd-operator` | +| `etcdOperator.image.tag` | etcd-operator container image tag | `v0.9.3` | +| `etcdOperator.image.pullpolicy` | etcd-operator container image pull policy | `Always` | +| `etcdOperator.resources.cpu` | CPU limit per etcd-operator pod | `100m` | +| `etcdOperator.resources.memory` | Memory limit per etcd-operator pod | `128Mi` | +| `etcdOperator.securityContext` | SecurityContext for etcd operator | `{}` | +| `etcdOperator.nodeSelector` | Node labels for etcd operator pod assignment | `{}` | +| `etcdOperator.podAnnotations` | Annotations for the etcd operator pod | `{}` | +| `etcdOperator.commandArgs` | Additional command arguments | `{}` | +| `backupOperator.name` | Backup operator name | `etcd-backup-operator` | +| `backupOperator.replicaCount` | Number of operator replicas to create (only 1 is supported) | `1` | +| `backupOperator.image.repository` | Operator container image | `quay.io/coreos/etcd-operator` | +| `backupOperator.image.tag` | Operator container image tag | `v0.9.3` | +| `backupOperator.image.pullpolicy` | Operator container image pull policy | `Always` | +| `backupOperator.resources.cpu` | CPU limit per etcd-operator pod | `100m` | +| `backupOperator.resources.memory` | Memory limit per etcd-operator pod | `128Mi` | +| `backupOperator.securityContext` | SecurityContext for etcd backup operator | `{}` | +| `backupOperator.spec.storageType` | Storage to use for backup file, currently only S3 supported | `S3` | +| `backupOperator.spec.s3.s3Bucket` | Bucket in S3 to store backup file | | +| `backupOperator.spec.s3.awsSecret` | Name of kubernetes secret containing aws credentials | | +| `backupOperator.nodeSelector` | Node labels for etcd operator pod assignment | `{}` | +| `backupOperator.commandArgs` | Additional command arguments | `{}` | +| `restoreOperator.name` | Restore operator name | `etcd-backup-operator` | +| `restoreOperator.replicaCount` | Number of operator replicas to create (only 1 is supported) | `1` | +| `restoreOperator.image.repository` | Operator container image | `quay.io/coreos/etcd-operator` | +| `restoreOperator.image.tag` | Operator container image tag | `v0.9.3` | +| `restoreOperator.image.pullpolicy` | Operator container image pull policy | `Always` | +| `restoreOperator.resources.cpu` | CPU limit per etcd-operator pod | `100m` | +| `restoreOperator.resources.memory` | Memory limit per etcd-operator pod | `128Mi` | +| `restoreOperator.securityContext` | SecurityContext for etcd restore operator | `{}` | +| `restoreOperator.spec.s3.path` | Path in S3 bucket containing the backup file | | +| `restoreOperator.spec.s3.awsSecret` | Name of kubernetes secret containing aws credentials | | +| `restoreOperator.nodeSelector` | Node labels for etcd operator pod assignment | `{}` | +| `restoreOperator.commandArgs` | Additional command arguments | `{}` | +| `etcdCluster.name` | etcd cluster name | `etcd-cluster` | +| `etcdCluster.size` | etcd cluster size | `3` | +| `etcdCluster.version` | etcd cluster version | `3.2.25` | +| `etcdCluster.image.repository` | etcd container image | `quay.io/coreos/etcd-operator` | +| `etcdCluster.image.tag` | etcd container image tag | `v3.2.25` | +| `etcdCluster.image.pullPolicy` | etcd container image pull policy | `Always` | +| `etcdCluster.enableTLS` | Enable use of TLS | `false` | +| `etcdCluster.tls.static.member.peerSecret` | Kubernetes secret containing TLS peer certs | `etcd-peer-tls` | +| `etcdCluster.tls.static.member.serverSecret` | Kubernetes secret containing TLS server certs | `etcd-server-tls` | +| `etcdCluster.tls.static.operatorSecret` | Kubernetes secret containing TLS client certs | `etcd-client-tls` | +| `etcdCluster.pod.antiAffinity` | Whether etcd cluster pods should have an antiAffinity | `false` | +| `etcdCluster.pod.resources.limits.cpu` | CPU limit per etcd cluster pod | `100m` | +| `etcdCluster.pod.resources.limits.memory` | Memory limit per etcd cluster pod | `128Mi` | +| `etcdCluster.pod.resources.requests.cpu` | CPU request per etcd cluster pod | `100m` | +| `etcdCluster.pod.resources.requests.memory` | Memory request per etcd cluster pod | `128Mi` | +| `etcdCluster.pod.nodeSelector` | Node labels for etcd cluster pod assignment | `{}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```bash +$ helm install --name my-release --set image.tag=v0.2.1 stable/etcd-operator +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```bash +$ helm install --name my-release --values values.yaml stable/etcd-operator +``` + +## RBAC +By default the chart will install the recommended RBAC roles and rolebindings. + +To determine if your cluster supports this running the following: + +```console +$ kubectl api-versions | grep rbac +``` + +You also need to have the following parameter on the api server. See the following document for how to enable [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/) + +``` +--authorization-mode=RBAC +``` + +If the output contains "beta" or both "alpha" and "beta" you can may install rbac by default, if not, you may turn RBAC off as described below. + +### RBAC role/rolebinding creation + +RBAC resources are enabled by default. To disable RBAC do the following: + +```console +$ helm install --name my-release stable/etcd-operator --set rbac.create=false +``` + +### Changing RBAC manifest apiVersion + +By default the RBAC resources are generated with the "v1beta1" apiVersion. To use "v1alpha1" do the following: + +```console +$ helm install --name my-release stable/etcd-operator --set rbac.install=true,rbac.apiVersion=v1alpha1 +``` diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/NOTES.txt b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/NOTES.txt new file mode 100644 index 00000000..30d7ec0f --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/NOTES.txt @@ -0,0 +1,27 @@ +{{- if .Values.customResources.createEtcdClusterCRD -}} +1. Watch etcd cluster start + kubectl get pods -l etcd_cluster={{ .Values.etcdCluster.name }} --namespace {{ .Release.Namespace }} -w + +2. Confirm etcd cluster is healthy + $ kubectl run --rm -i --tty --env="ETCDCTL_API=3" --env="ETCDCTL_ENDPOINTS=http://{{ .Values.etcdCluster.name }}-client:2379" --namespace {{ .Release.Namespace }} etcd-test --image quay.io/coreos/etcd --restart=Never -- /bin/sh -c 'watch -n1 "etcdctl member list"' + +3. Interact with the cluster! + $ kubectl run --rm -i --tty --env ETCDCTL_API=3 --namespace {{ .Release.Namespace }} etcd-test --image quay.io/coreos/etcd --restart=Never -- /bin/sh + / # etcdctl --endpoints http://{{ .Values.etcdCluster.name }}-client:2379 put foo bar + / # etcdctl --endpoints http://{{ .Values.etcdCluster.name }}-client:2379 get foo + OK + (ctrl-D to exit) + +4. Optional + Check the etcd-operator logs + export POD=$(kubectl get pods -l app={{ template "etcd-operator.fullname" . }} --namespace {{ .Release.Namespace }} --output name) + kubectl logs $POD --namespace={{ .Release.Namespace }} + +{{- else -}} +1. etcd-operator deployed. + If you would like to deploy an etcd-cluster set 'customResources.createEtcdClusterCRD' to true in values.yaml + Check the etcd-operator logs + export POD=$(kubectl get pods -l app={{ template "etcd-operator.fullname" . }} --namespace {{ .Release.Namespace }} --output name) + kubectl logs $POD --namespace={{ .Release.Namespace }} + +{{- end -}} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/_helpers.tpl b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/_helpers.tpl new file mode 100644 index 00000000..e4076835 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/_helpers.tpl @@ -0,0 +1,53 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "etcd-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "etcd-operator.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.etcdOperator.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "etcd-backup-operator.name" -}} +{{- default .Chart.Name .Values.backupOperator.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "etcd-backup-operator.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.backupOperator.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "etcd-restore-operator.name" -}} +{{- default .Chart.Name .Values.restoreOperator.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "etcd-restore-operator.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.restoreOperator.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the etcd-operator service account to use +*/}} +{{- define "etcd-operator.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "etcd-operator.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/backup-etcd-crd.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/backup-etcd-crd.yaml new file mode 100644 index 00000000..5528f766 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/backup-etcd-crd.yaml @@ -0,0 +1,18 @@ +{{- if .Values.customResources.createBackupCRD }} +--- +apiVersion: "etcd.database.coreos.com/v1beta2" +kind: "EtcdBackup" +metadata: + name: {{ template "etcd-backup-operator.fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "etcd-backup-operator.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": "post-install" + "helm.sh/hook-delete-policy": "before-hook-creation" +spec: + clusterName: {{ .Values.etcdCluster.name }} +{{ toYaml .Values.backupOperator.spec | indent 2 }} +{{- end}}
\ No newline at end of file diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/backup-operator-deployment.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/backup-operator-deployment.yaml new file mode 100644 index 00000000..8b8d51b0 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/backup-operator-deployment.yaml @@ -0,0 +1,60 @@ +{{- if .Values.deployments.backupOperator }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "etcd-backup-operator.fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "etcd-backup-operator.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ template "etcd-backup-operator.fullname" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.backupOperator.replicaCount }} + template: + metadata: + name: {{ template "etcd-backup-operator.fullname" . }} + labels: + app: {{ template "etcd-backup-operator.fullname" . }} + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ template "etcd-operator.serviceAccountName" . }} + containers: + - name: {{ .Values.backupOperator.name }} + image: "{{ .Values.backupOperator.image.repository }}:{{ .Values.backupOperator.image.tag }}" + imagePullPolicy: {{ .Values.backupOperator.image.pullPolicy }} + command: + - etcd-backup-operator +{{- range $key, $value := .Values.backupOperator.commandArgs }} + - "--{{ $key }}={{ $value }}" +{{- end }} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + resources: + limits: + cpu: {{ .Values.backupOperator.resources.cpu }} + memory: {{ .Values.backupOperator.resources.memory }} + requests: + cpu: {{ .Values.backupOperator.resources.cpu }} + memory: {{ .Values.backupOperator.resources.memory }} + {{- if .Values.backupOperator.nodeSelector }} + nodeSelector: {{ toYaml .Values.backupOperator.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.backupOperator.securityContext }} + securityContext: {{ toYaml .Values.backupOperator.securityContext | nindent 8 }} + {{- end }} + {{- if .Values.backupOperator.tolerations }} + tolerations: {{ toYaml .Values.backupOperator.tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/etcd-cluster-crd.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/etcd-cluster-crd.yaml new file mode 100644 index 00000000..93ee84c5 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/etcd-cluster-crd.yaml @@ -0,0 +1,51 @@ +{{- if .Values.deployments.etcdOperator }} +# Synced with https://github.com/coreos/etcd-operator/blob/master/pkg/util/k8sutil/crd.go +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: etcdclusters.etcd.database.coreos.com + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "etcd-operator.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + helm.sh/hook: crd-install + helm.sh/hook-delete-policy: before-hook-creation +spec: + group: etcd.database.coreos.com + scope: Namespaced + version: v1beta2 + names: + kind: EtcdCluster + listKind: EtcdClusterList + singular: etcdcluster + plural: etcdclusters + shortNames: + - etcd +{{- end }} +{{- if .Values.customResources.createEtcdClusterCRD }} +--- +apiVersion: "etcd.database.coreos.com/v1beta2" +kind: "EtcdCluster" +metadata: + name: {{ .Values.etcdCluster.name }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "etcd-operator.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": "post-install" + "helm.sh/hook-delete-policy": "before-hook-creation" +spec: + size: {{ .Values.etcdCluster.size }} + version: "{{ .Values.etcdCluster.version }}" + pod: +{{ toYaml .Values.etcdCluster.pod | indent 4 }} + {{- if .Values.etcdCluster.enableTLS }} + TLS: +{{ toYaml .Values.etcdCluster.tls | indent 4 }} + {{- end }} +{{- end }} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-cluster-role.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-cluster-role.yaml new file mode 100644 index 00000000..62085978 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-cluster-role.yaml @@ -0,0 +1,49 @@ +{{- if .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }} +kind: ClusterRole +metadata: + name: {{ template "etcd-operator.fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "etcd-operator.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - etcd.database.coreos.com + resources: + - etcdclusters + - etcdbackups + - etcdrestores + verbs: + - "*" +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + verbs: + - "*" +- apiGroups: + - apps + resources: + - deployments + verbs: + - "*" +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +{{- end }} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-clusterrole-binding.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-clusterrole-binding.yaml new file mode 100644 index 00000000..09594ccc --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-clusterrole-binding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.rbac.create .Values.deployments.etcdOperator }} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/{{ required "A valid .Values.rbac.apiVersion entry required!" .Values.rbac.apiVersion }} +metadata: + name: {{ template "etcd-operator.fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "etcd-operator.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +subjects: +- kind: ServiceAccount + name: {{ template "etcd-operator.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "etcd-operator.fullname" . }} +{{- end }} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-deployment.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-deployment.yaml new file mode 100644 index 00000000..dc50d46e --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-deployment.yaml @@ -0,0 +1,83 @@ +{{- if .Values.deployments.etcdOperator }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "etcd-operator.fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "etcd-operator.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ template "etcd-operator.fullname" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.etcdOperator.replicaCount }} + template: + metadata: + name: {{ template "etcd-operator.fullname" . }} + labels: + app: {{ template "etcd-operator.fullname" . }} + release: {{ .Release.Name }} + annotations: {{ toYaml .Values.etcdOperator.podAnnotations | nindent 8}} + spec: + serviceAccountName: {{ template "etcd-operator.serviceAccountName" . }} + containers: + - name: {{ template "etcd-operator.fullname" . }} + image: "{{ .Values.etcdOperator.image.repository }}:{{ .Values.etcdOperator.image.tag }}" + imagePullPolicy: {{ .Values.etcdOperator.image.pullPolicy }} + command: + - etcd-operator +{{- range $key, $value := .Values.etcdOperator.commandArgs }} + - "--{{ $key }}={{ $value }}" +{{- end }} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + resources: + limits: + cpu: {{ .Values.etcdOperator.resources.cpu }} + memory: {{ .Values.etcdOperator.resources.memory }} + requests: + cpu: {{ .Values.etcdOperator.resources.cpu }} + memory: {{ .Values.etcdOperator.resources.memory }} + {{- if .Values.etcdOperator.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /readyz + port: 8080 + initialDelaySeconds: {{ .Values.etcdOperator.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.etcdOperator.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.etcdOperator.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.etcdOperator.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.etcdOperator.livenessProbe.failureThreshold }} + {{- end}} + {{- if .Values.etcdOperator.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /readyz + port: 8080 + initialDelaySeconds: {{ .Values.etcdOperator.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.etcdOperator.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.etcdOperator.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.etcdOperator.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.etcdOperator.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.etcdOperator.nodeSelector }} + nodeSelector: {{ toYaml .Values.etcdOperator.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.etcdOperator.securityContext }} + securityContext: {{ toYaml .Values.etcdOperator.securityContext | nindent 8 }} + {{- end }} + {{- if .Values.etcdOperator.tolerations }} + tolerations: {{ toYaml .Values.etcdOperator.tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-service-account.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-service-account.yaml new file mode 100644 index 00000000..423be9c4 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/operator-service-account.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.serviceAccount.create .Values.deployments.etcdOperator }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "etcd-operator.serviceAccountName" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "etcd-operator.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }} +{{- end }} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/restore-etcd-crd.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/restore-etcd-crd.yaml new file mode 100644 index 00000000..73faaab8 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/restore-etcd-crd.yaml @@ -0,0 +1,28 @@ +{{- if .Values.customResources.createRestoreCRD }} +--- +apiVersion: "etcd.database.coreos.com/v1beta2" +kind: "EtcdRestore" +metadata: + # An EtcdCluster with the same name will be created + name: {{ .Values.etcdCluster.name }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "etcd-restore-operator.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + "helm.sh/hook": "post-install" + "helm.sh/hook-delete-policy": "before-hook-creation" +spec: + clusterSpec: + size: {{ .Values.etcdCluster.size }} + baseImage: "{{ .Values.etcdCluster.image.repository }}" + version: {{ .Values.etcdCluster.image.tag }} + pod: +{{ toYaml .Values.etcdCluster.pod | indent 6 }} + {{- if .Values.etcdCluster.enableTLS }} + TLS: +{{ toYaml .Values.etcdCluster.tls | indent 6 }} + {{- end }} +{{ toYaml .Values.restoreOperator.spec | indent 2 }} +{{- end}}
\ No newline at end of file diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/restore-operator-deployment.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/restore-operator-deployment.yaml new file mode 100644 index 00000000..ce27f6f8 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/restore-operator-deployment.yaml @@ -0,0 +1,64 @@ +{{- if .Values.deployments.restoreOperator }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "etcd-restore-operator.fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "etcd-restore-operator.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ template "etcd-restore-operator.name" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.restoreOperator.replicaCount }} + template: + metadata: + name: {{ template "etcd-restore-operator.fullname" . }} + labels: + app: {{ template "etcd-restore-operator.name" . }} + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ template "etcd-operator.serviceAccountName" . }} + containers: + - name: {{ .Values.restoreOperator.name }} + image: "{{ .Values.restoreOperator.image.repository }}:{{ .Values.restoreOperator.image.tag }}" + imagePullPolicy: {{ .Values.restoreOperator.image.pullPolicy }} + ports: + - containerPort: {{ .Values.restoreOperator.port }} + command: + - etcd-restore-operator +{{- range $key, $value := .Values.restoreOperator.commandArgs }} + - "--{{ $key }}={{ $value }}" +{{- end }} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SERVICE_ADDR + value: "{{ .Values.restoreOperator.name }}:{{ .Values.restoreOperator.port }}" + resources: + limits: + cpu: {{ .Values.restoreOperator.resources.cpu }} + memory: {{ .Values.restoreOperator.resources.memory }} + requests: + cpu: {{ .Values.restoreOperator.resources.cpu }} + memory: {{ .Values.restoreOperator.resources.memory }} + {{- if .Values.restoreOperator.nodeSelector }} + nodeSelector: {{ toYaml .Values.restoreOperator.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.restoreOperator.securityContext }} + securityContext: {{ toYaml .Values.restoreOperator.securityContext | nindent 8 }} + {{- end }} + {{- if .Values.restoreOperator.tolerations }} + tolerations: {{ toYaml .Values.restoreOperator.tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/restore-operator-service.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/restore-operator-service.yaml new file mode 100644 index 00000000..052be364 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/templates/restore-operator-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.deployments.restoreOperator }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.restoreOperator.name }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ template "etcd-restore-operator.name" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + ports: + - protocol: TCP + name: http-etcd-restore-port + port: {{ .Values.restoreOperator.port }} + selector: + app: {{ template "etcd-restore-operator.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/values.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/values.yaml new file mode 100644 index 00000000..e698f338 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/etcd-operator/values.yaml @@ -0,0 +1,162 @@ +# Default values for etcd-operator. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Enable etcd-operator +# To be used from parent operator package requirements.yaml +enabled: true + +global: + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + +## Install Default RBAC roles and bindings +rbac: + create: true + apiVersion: v1 + +## Service account name and whether to create it +serviceAccount: + create: true + name: + +# Select what to deploy +deployments: + etcdOperator: true + # one time deployment, delete once completed, + # Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/backup-operator.md + backupOperator: false + # one time deployment, delete once completed + # Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/restore-operator.md + restoreOperator: false + +# creates custom resources, not all required, +# you could use `helm template --values <values.yaml> --name release_name ... ` +# and create the resources yourself to deploy on your cluster later +customResources: + createEtcdClusterCRD: false + createBackupCRD: false + createRestoreCRD: false + +# etcdOperator +etcdOperator: + name: etcd-operator + replicaCount: 1 + image: + repository: quay.io/coreos/etcd-operator + tag: v0.9.4 + pullPolicy: Always + resources: + cpu: 100m + memory: 128Mi + ## Node labels for etcd-operator pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + ## additional command arguments go here; will be translated to `--key=value` form + ## e.g., analytics: true + commandArgs: {} + ## Configurable health checks against the /readyz endpoint that etcd-operator exposes + readinessProbe: + enabled: false + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + livenessProbe: + enabled: false + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 +# backup spec +backupOperator: + name: etcd-backup-operator + replicaCount: 1 + image: + repository: quay.io/coreos/etcd-operator + tag: v0.9.4 + pullPolicy: Always + resources: + cpu: 100m + memory: 128Mi + spec: + storageType: S3 + s3: + s3Bucket: + awsSecret: + ## Node labels for etcd pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + ## additional command arguments go here; will be translated to `--key=value` form + ## e.g., analytics: true + commandArgs: {} + securityContext: {} + tolerations: [] + +# restore spec +restoreOperator: + name: etcd-restore-operator + replicaCount: 1 + image: + repository: quay.io/coreos/etcd-operator + tag: v0.9.4 + pullPolicy: Always + port: 19999 + resources: + cpu: 100m + memory: 128Mi + spec: + s3: + # The format of "path" must be: "<s3-bucket-name>/<path-to-backup-file>" + # e.g: "etcd-snapshot-bucket/v1/default/example-etcd-cluster/3.2.10_0000000000000001_etcd.backup" + path: + awsSecret: + ## Node labels for etcd pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + ## additional command arguments go here; will be translated to `--key=value` form + ## e.g., analytics: true + commandArgs: {} + securityContext: {} + tolerations: [] + +## etcd-cluster specific values +etcdCluster: + name: etcd-cluster + size: 3 + version: 3.3.3 + image: + repository: quay.io/coreos/etcd + tag: v3.3.3 + pullPolicy: Always + enableTLS: false + # TLS configs + tls: + static: + member: + peerSecret: etcd-peer-tls + serverSecret: etcd-server-tls + operatorSecret: etcd-client-tls + ## etcd cluster pod specific values + ## Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-members-cluster-with-resource-requirement + pod: + ## Antiaffinity for etcd pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + antiAffinity: false + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + ## Node labels for etcd pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + securityContext: {} + tolerations: [] diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/Chart.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/Chart.yaml new file mode 100644 index 00000000..ebdc0b40 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/Chart.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +name: m3db-operator +version: 0.1.3 +# TODO(PS) - helm has issues with GKE's SemVer +# Error: Chart requires kubernetesVersion: >=1.10.6 which is incompatible with Kubernetes v1.10.7-gke.2 +# +#kubeVersion: ">=1.10.7" +description: Kubernetes operator for M3DB timeseries database +keywords: + - operator + - m3 +home: https://github.com/m3db/m3db-operator +sources: + - https://github.com/m3db/m3db-operator +maintainers: + - name: m3 Authors + email: m3db@googlegroups.com + url: https://operator.m3db.io/ +engine: gotpl +icon: https://raw.githubusercontent.com/m3db/m3/master/docs/theme/assets/images/M3-logo.png +appVersion: ">0.4.7" +tillerVersion: ">=2.11.0" diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/LICENSE b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/NOTES.txt b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/NOTES.txt new file mode 100644 index 00000000..ca4143db --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/NOTES.txt @@ -0,0 +1,12 @@ + ___ _ _ + / _ \ _ __ ___ _ __ __ _| |_ ___ _ __ | |__ __ _ ___ +| | | | '_ \ / _ \ '__/ _` | __/ _ \| '__| | '_ \ / _` / __| +| |_| | |_) | __/ | | (_| | || (_) | | | | | | (_| \__ \ + \___/| .__/ \___|_| \__,_|\__\___/|_| |_| |_|\__,_|___/ + |_| + _ _ _ _ _ _ +| |__ ___ ___ _ __ (_)_ __ ___| |_ __ _| | | ___ __| | +| '_ \ / _ \/ _ \ '_ \ | | '_ \/ __| __/ _` | | |/ _ \/ _` | +| |_) | __/ __/ | | | | | | | \__ \ || (_| | | | __/ (_| | +|_.__/ \___|\___|_| |_| |_|_| |_|___/\__\__,_|_|_|\___|\__,_| + diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/README.md b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/README.md new file mode 100644 index 00000000..0a532d31 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/README.md @@ -0,0 +1,14 @@ +### Helm Charts for M3DB clusters on Kubernetes + +### Prerequisite + +[Install helm](https://docs.helm.sh/using_helm/#installing-helm) + +### Installing m3db-operator chart + +``` +cd helm/m3db-operator +helm package . +helm install m3db-operator-0.0.1.tgz +``` + diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/cluster_role.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/cluster_role.yaml new file mode 100644 index 00000000..7bf41739 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/cluster_role.yaml @@ -0,0 +1,35 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ .Values.operator.name }} +rules: +- apiGroups: ["extensions"] + resources: ["deployments", "replicasets", "daemonsets"] + verbs: ["create", "get", "update", "delete", "list"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "get", "update", "delete", "list"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "create", "delete", "deletecollection"] +- apiGroups: [""] + resources: ["persistentvolumes", "persistentvolumeclaims", "services", "secrets", "configmaps"] + verbs: ["create", "get", "update", "delete", "list"] +- apiGroups: ["batch"] + resources: ["cronjobs", "jobs"] + verbs: ["create", "get", "deletecollection", "delete"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["list", "get", "watch", "update"] +- apiGroups: ["apps"] + resources: ["statefulsets", "deployments"] + verbs: ["*"] +- apiGroups: ["operator.m3db.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/cluster_role_binding.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/cluster_role_binding.yaml new file mode 100644 index 00000000..876a6705 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/cluster_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ .Values.operator.name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Values.operator.name }} +subjects: +- kind: ServiceAccount + name: {{ .Values.operator.name }} + namespace: {{ .Release.Namespace }} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/service_account.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/service_account.yaml new file mode 100644 index 00000000..a65e90bc --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.operator.name }} + namespace: {{ .Release.Namespace }} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/stateful_set.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/stateful_set.yaml new file mode 100644 index 00000000..d1002378 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/templates/stateful_set.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .Values.operator.name }} + namespace: {{ .Release.Namespace }} +spec: + serviceName: {{ .Values.operator.name }} + replicas: 1 + selector: + matchLabels: + name: {{ .Values.operator.name }} + template: + metadata: + labels: + name: {{ .Values.operator.name }} + spec: + containers: + - name: {{ .Values.operator.name }} + image: {{ .Values.image.repository}}:{{ .Values.image.tag }} + command: + - m3db-operator + imagePullPolicy: Always + env: + - name: ENVIRONMENT + value: {{ .Values.environment }} + serviceAccount: {{ .Values.operator.name }} diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/values.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/values.yaml new file mode 100644 index 00000000..8411d77e --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/charts/m3db-operator/values.yaml @@ -0,0 +1,6 @@ +operator: + name: m3db-operator +image: + repository: quay.io/m3db/m3db-operator + tag: v0.1.3 +environment: production diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/requirements.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/requirements.yaml new file mode 100644 index 00000000..8635dc4d --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: etcd-operator + condition: etcd-operator.enabled + # - name: visualization-operator + # condition: visualization-operator.enabled diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/resources/m3db.labels b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/resources/m3db.labels new file mode 100644 index 00000000..4f1ddd53 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/resources/m3db.labels @@ -0,0 +1,7 @@ +kubectl label node/otconap6 failure-domain.beta.kubernetes.io/region=us-west1 +kubectl label node/otconap11 failure-domain.beta.kubernetes.io/region=us-west1 +kubectl label node/otccloud02 failure-domain.beta.kubernetes.io/region=us-west1 + +kubectl label node/otconap6 failure-domain.beta.kubernetes.io/zone=us-west1-a --overwrite=true +kubectl label node/otconap11 failure-domain.beta.kubernetes.io/zone=us-west1-b --overwrite=true +kubectl label node/otccloud02 failure-domain.beta.kubernetes.io/zone=us-west1-c --overwrite=true
\ No newline at end of file diff --git a/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/values.yaml b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/values.yaml new file mode 100644 index 00000000..071fa9a0 --- /dev/null +++ b/kud/tests/vnfs/comp-app/collection/etcd-m3db-operators/helm/operator/values.yaml @@ -0,0 +1,38 @@ +# Copyright © 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################################################# +# Global configuration defaults. +################################################################# +global: + nodePortPrefix: 310 + repository: nexus3.onap.org:10001 + readinessRepository: oomk8s + readinessImage: readiness-check:2.0.0 + loggingRepository: docker.elastic.co + loggingImage: beats/filebeat:5.5.0 + +################################################################# +# k8s Operator Day-0 configuration defaults. +################################################################# + + +################################################################# +# Enable or disable components +################################################################# + +etcd-operator: + enabled: true + #visualization-operator: + #enabled: true diff --git a/src/orchestrator/pkg/grpc/installappclient/client.go b/src/orchestrator/pkg/grpc/installappclient/client.go new file mode 100644 index 00000000..4c652a84 --- /dev/null +++ b/src/orchestrator/pkg/grpc/installappclient/client.go @@ -0,0 +1,66 @@ +/* +Copyright 2020 Intel Corporation. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package installappclient + +import ( + "context" + "time" + + log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils" + "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/rpc" + installpb "github.com/onap/multicloud-k8s/src/rsync/pkg/grpc/installapp" + pkgerrors "github.com/pkg/errors" +) + +// InvokeInstallApp will make the grpc call to the resource synchronizer +// or rsync controller. +// rsync will deply the resources in the app context to the clusters as +// prepared in the app context. +func InvokeInstallApp(appContextId string) error { + var err error + var rpcClient installpb.InstallappClient + var installRes *installpb.InstallAppResponse + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + conn := rpc.GetRpcConn("rsync") + + if conn != nil { + rpcClient = installpb.NewInstallappClient(conn) + installReq := new(installpb.InstallAppRequest) + installReq.AppContext = appContextId + installRes, err = rpcClient.InstallApp(ctx, installReq) + if err == nil { + log.Info("Response from InstappApp GRPC call", log.Fields{ + "Succeeded": installRes.AppContextInstalled, + "Message": installRes.AppContextInstallMessage, + }) + } + } else { + return pkgerrors.Errorf("InstallApp Failed - Could not get InstallAppClient: %v", "rsync") + } + + if err == nil { + if installRes.AppContextInstalled { + log.Info("InstallApp Success", log.Fields{ + "AppContext": appContextId, + "Message": installRes.AppContextInstallMessage, + }) + return nil + } else { + return pkgerrors.Errorf("InstallApp Failed: %v", installRes.AppContextInstallMessage) + } + } + return err +} diff --git a/src/orchestrator/pkg/module/instantiation.go b/src/orchestrator/pkg/module/instantiation.go index 76be2a2d..043b80f2 100644 --- a/src/orchestrator/pkg/module/instantiation.go +++ b/src/orchestrator/pkg/module/instantiation.go @@ -18,7 +18,9 @@ package module import ( "encoding/base64" + "encoding/json" "fmt" + gpic "github.com/onap/multicloud-k8s/src/orchestrator/pkg/gpic" "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/db" log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils" @@ -200,11 +202,20 @@ func (c InstantiationClient) Instantiate(p string, ca string, v string, di strin ctxval := cca.ctxval compositeHandle := cca.compositeAppHandle - var appOrder []string + var appOrderInstr struct { + Apporder []string `json:"apporder"` + } + + var appDepInstr struct { + Appdep map[string]string `json:"appdependency"` + } + appdep := make(map[string]string) // Add composite app using appContext for _, eachApp := range allApps { - appOrder = append(appOrder, eachApp.Metadata.Name) + appOrderInstr.Apporder = append(appOrderInstr.Apporder, eachApp.Metadata.Name) + appdep[eachApp.Metadata.Name] = "go" + sortedTemplates, err := GetSortedTemplateForApp(eachApp.Metadata.Name, p, ca, v, rName, cp, overrideValues) if err != nil { @@ -250,7 +261,11 @@ func (c InstantiationClient) Instantiate(p string, ca string, v string, di strin } } - context.AddInstruction(compositeHandle, "app", "order", appOrder) + jappOrderInstr, _ := json.Marshal(appOrderInstr) + appDepInstr.Appdep = appdep + jappDepInstr, _ := json.Marshal(appDepInstr) + context.AddInstruction(compositeHandle, "app", "order", string(jappOrderInstr)) + context.AddInstruction(compositeHandle, "app", "dependency", string(jappDepInstr)) //END: storing into etcd // BEGIN:: save the context in the orchestrator db record @@ -300,6 +315,10 @@ func (c InstantiationClient) Instantiate(p string, ca string, v string, di strin // END: Scheduler code // BEGIN : Rsync code + err = callRsync(ctxval) + if err != nil { + return err + } // END : Rsyc code log.Info(":: Done with instantiation... ::", log.Fields{"CompositeAppName": ca}) diff --git a/src/orchestrator/pkg/module/instantiation_appcontext_helper.go b/src/orchestrator/pkg/module/instantiation_appcontext_helper.go index 1734a0c8..43ddd6df 100644 --- a/src/orchestrator/pkg/module/instantiation_appcontext_helper.go +++ b/src/orchestrator/pkg/module/instantiation_appcontext_helper.go @@ -22,19 +22,21 @@ It contains methods for creating appContext, saving cluster and resource details */ import ( + "encoding/json" + "io/ioutil" + "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext" gpic "github.com/onap/multicloud-k8s/src/orchestrator/pkg/gpic" log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils" "github.com/onap/multicloud-k8s/src/orchestrator/utils" "github.com/onap/multicloud-k8s/src/orchestrator/utils/helm" pkgerrors "github.com/pkg/errors" - "io/ioutil" ) // resource consists of name of reource type resource struct { name string - filecontent []byte + filecontent string } type contextForCompositeApp struct { @@ -81,17 +83,27 @@ func getResources(st []helm.KubernetesResourceTemplate) ([]resource, error) { } n := yamlStruct.Metadata.Name + SEPARATOR + yamlStruct.Kind - resources = append(resources, resource{name: n, filecontent: yamlFile}) + resources = append(resources, resource{name: n, filecontent: string(yamlFile)}) log.Info(":: Added resource into resource-order ::", log.Fields{"ResourceName": n}) } return resources, nil } -func addResourcesToCluster(ct appcontext.AppContext, ch interface{}, resources []resource, resourceOrder []string) error { +func addResourcesToCluster(ct appcontext.AppContext, ch interface{}, resources []resource) error { + + var resOrderInstr struct { + Resorder []string `json:"resorder"` + } + + var resDepInstr struct { + Resdep map[string]string `json:"resdependency"` + } + resdep := make(map[string]string) for _, resource := range resources { - resourceOrder = append(resourceOrder, resource.name) + resOrderInstr.Resorder = append(resOrderInstr.Resorder, resource.name) + resdep[resource.name] = "go" _, err := ct.AddResource(ch, resource.name, resource.filecontent) if err != nil { cleanuperr := ct.DeleteCompositeApp() @@ -100,7 +112,11 @@ func addResourcesToCluster(ct appcontext.AppContext, ch interface{}, resources [ } return pkgerrors.Wrapf(err, "Error adding resource ::%s to AppContext", resource.name) } - _, err = ct.AddInstruction(ch, "resource", "order", resourceOrder) + jresOrderInstr, _ := json.Marshal(resOrderInstr) + resDepInstr.Resdep = resdep + jresDepInstr, _ := json.Marshal(resDepInstr) + _, err = ct.AddInstruction(ch, "resource", "order", string(jresOrderInstr)) + _, err = ct.AddInstruction(ch, "resource", "dependency", string(jresDepInstr)) if err != nil { cleanuperr := ct.DeleteCompositeApp() if cleanuperr != nil { @@ -120,7 +136,6 @@ func addClustersToAppContext(l gpic.ClusterList, ct appcontext.AppContext, appHa for _, c := range mc { p := c.ProviderName n := c.ClusterName - var resourceOrder []string clusterhandle, err := ct.AddCluster(appHandle, p+SEPARATOR+n) if err != nil { cleanuperr := ct.DeleteCompositeApp() @@ -130,7 +145,7 @@ func addClustersToAppContext(l gpic.ClusterList, ct appcontext.AppContext, appHa return pkgerrors.Wrapf(err, "Error adding Cluster(provider::%s and name::%s) to AppContext", p, n) } - err = addResourcesToCluster(ct, clusterhandle, resources, resourceOrder) + err = addResourcesToCluster(ct, clusterhandle, resources) if err != nil { return pkgerrors.Wrapf(err, "Error adding Resources to Cluster(provider::%s and name::%s) to AppContext", p, n) } @@ -144,7 +159,6 @@ func addClustersToAppContext(l gpic.ClusterList, ct appcontext.AppContext, appHa p := eachCluster.ProviderName n := eachCluster.ClusterName - var resourceOrder []string clusterhandle, err := ct.AddCluster(appHandle, p+SEPARATOR+n) if err != nil { @@ -164,7 +178,7 @@ func addClustersToAppContext(l gpic.ClusterList, ct appcontext.AppContext, appHa return pkgerrors.Wrapf(err, "Error adding Cluster(provider::%s and name::%s) to AppContext", p, n) } - err = addResourcesToCluster(ct, clusterhandle, resources, resourceOrder) + err = addResourcesToCluster(ct, clusterhandle, resources) if err != nil { return pkgerrors.Wrapf(err, "Error adding Resources to Cluster(provider::%s, name::%s and groupName:: %s) to AppContext", p, n, gn) } diff --git a/src/orchestrator/pkg/module/instantiation_scheduler_helper.go b/src/orchestrator/pkg/module/instantiation_scheduler_helper.go index e4bbbfac..3d9d851c 100644 --- a/src/orchestrator/pkg/module/instantiation_scheduler_helper.go +++ b/src/orchestrator/pkg/module/instantiation_scheduler_helper.go @@ -23,6 +23,7 @@ import ( "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext" client "github.com/onap/multicloud-k8s/src/orchestrator/pkg/grpc/contextupdateclient" + rsyncclient "github.com/onap/multicloud-k8s/src/orchestrator/pkg/grpc/installappclient" log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils" "github.com/onap/multicloud-k8s/src/orchestrator/pkg/module/controller" mtypes "github.com/onap/multicloud-k8s/src/orchestrator/pkg/module/types" @@ -191,6 +192,18 @@ func callGrpcForControllerList(cl []controller.Controller, mc map[string]string, } /* +callRsync method shall take in the app context id and invokes the rsync service via grpc +*/ +func callRsync(contextid interface{}) error { + appContextID := fmt.Sprintf("%v", contextid) + err := rsyncclient.InvokeInstallApp(appContextID) + if err != nil { + return err + } + return nil +} + +/* deleteExtraClusters method shall delete the extra cluster handles for each AnyOf cluster present in the etcd after the grpc call for context updation. */ func deleteExtraClusters(apps []App, ct appcontext.AppContext) error { diff --git a/src/rsync/cmd/main.go b/src/rsync/cmd/main.go index f46fa79b..95c36e20 100644 --- a/src/rsync/cmd/main.go +++ b/src/rsync/cmd/main.go @@ -81,7 +81,6 @@ func main() { // Initialize the mongodb err := db.InitializeDatabaseConnection("mco") if err != nil { - fmt.Println(" Exiting mongod ") log.Println("Unable to initialize database connection...") log.Println(err) log.Fatalln("Exiting...") @@ -90,14 +89,13 @@ func main() { // Initialize contextdb err = contextDb.InitializeContextDatabase() if err != nil { - fmt.Println(" Exiting etcd") log.Println("Unable to initialize database connection...") log.Println(err) log.Fatalln("Exiting...") } // Start grpc - fmt.Println("starting rsync GRPC server..") + log.Println("starting rsync GRPC server..") err = startGrpcServer() if err != nil { log.Fatalf("GRPC server failed to start") diff --git a/src/rsync/go.mod b/src/rsync/go.mod index 9c3362ce..a2c5f83b 100644 --- a/src/rsync/go.mod +++ b/src/rsync/go.mod @@ -3,13 +3,15 @@ module github.com/onap/multicloud-k8s/src/rsync go 1.13 require ( - github.com/golang/protobuf v1.3.4 + github.com/golang/protobuf v1.4.1 + github.com/googleapis/gnostic v0.4.0 + github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.9.5 // indirect + github.com/mattn/go-isatty v0.0.4 // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect github.com/onap/multicloud-k8s/src/orchestrator v0.0.0-20200601021239-7959bd4c6fd4 - golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 // indirect - google.golang.org/appengine v1.4.0 // indirect + go.etcd.io/bbolt v1.3.3 // indirect google.golang.org/grpc v1.27.1 - honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc // indirect - github.com/googleapis/gnostic v0.4.0 k8s.io/kubernetes v1.14.1 ) diff --git a/src/rsync/go.sum b/src/rsync/go.sum index 270417c2..c49c5b07 100644 --- a/src/rsync/go.sum +++ b/src/rsync/go.sum @@ -742,6 +742,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2El google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200305110556-506484158171 h1:xes2Q2k+d/+YNXVw0FpZkIDJiaux4OVrRKXRAzH6A0U= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -765,6 +767,9 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/src/rsync/pkg/app/client.go b/src/rsync/pkg/app/client.go new file mode 100644 index 00000000..fb57d46b --- /dev/null +++ b/src/rsync/pkg/app/client.go @@ -0,0 +1,154 @@ +/* +Copyright 2018 Intel Corporation. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "os" + "strings" + "time" + "encoding/base64" + + pkgerrors "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/discovery/cached/disk" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" + + "github.com/onap/multicloud-k8s/src/clm/pkg/cluster" +) + +const basePath string = "/tmp/rsync/" + +// KubernetesClient encapsulates the different clients' interfaces +// we need when interacting with a Kubernetes cluster +type KubernetesClient struct { + clientSet kubernetes.Interface + dynamicClient dynamic.Interface + discoverClient *disk.CachedDiscoveryClient + restMapper meta.RESTMapper + instanceID string +} + +// getKubeConfig uses the connectivity client to get the kubeconfig based on the name +// of the clustername. This is written out to a file. +func (k *KubernetesClient) getKubeConfig(clustername string, id string) (string, error) { + + if !strings.Contains(clustername, "+") { + return "", pkgerrors.New("Not a valid cluster name") + } + strs := strings.Split(clustername, "+") + if len(strs) != 2 { + return "", pkgerrors.New("Not a valid cluster name") + } + kubeConfig, err := cluster.NewClusterClient().GetClusterContent(strs[0], strs[1]) + if err != nil { + return "", pkgerrors.New("Get kubeconfig failed") + } + + var kubeConfigPath string = basePath + id + "/" + clustername + "/" + + if _, err := os.Stat(kubeConfigPath); os.IsNotExist(err) { + err = os.MkdirAll(kubeConfigPath, 0755) + if err != nil { + return "", err + } + } + kubeConfigPath = kubeConfigPath + "config" + + f, err := os.Create(kubeConfigPath) + defer f.Close() + if err != nil { + return "", err + } + dec, err := base64.StdEncoding.DecodeString(kubeConfig.Kubeconfig) + if err != nil { + return "", err + } + _, err = f.Write(dec) + if err != nil { + return "", err + } + + return kubeConfigPath, nil +} + +// init loads the Kubernetes configuation values stored into the local configuration file +func (k *KubernetesClient) Init(clustername string, iid string) error { + if clustername == "" { + return pkgerrors.New("Cloudregion is empty") + } + + if iid == "" { + return pkgerrors.New("Instance ID is empty") + } + + k.instanceID = iid + + configPath, err := k.getKubeConfig(clustername, iid) + if err != nil { + return pkgerrors.Wrap(err, "Get kubeconfig file") + } + + //Remove kubeconfigfile after the clients are created + defer os.Remove(configPath) + + config, err := clientcmd.BuildConfigFromFlags("", configPath) + if err != nil { + return pkgerrors.Wrap(err, "setConfig: Build config from flags raised an error") + } + + k.clientSet, err = kubernetes.NewForConfig(config) + if err != nil { + return err + } + + k.dynamicClient, err = dynamic.NewForConfig(config) + if err != nil { + return pkgerrors.Wrap(err, "Creating dynamic client") + } + + k.discoverClient, err = disk.NewCachedDiscoveryClientForConfig(config, os.TempDir(), "", 10*time.Minute) + if err != nil { + return pkgerrors.Wrap(err, "Creating discovery client") + } + + k.restMapper = restmapper.NewDeferredDiscoveryRESTMapper(k.discoverClient) + + return nil +} + +//GetMapper returns the RESTMapper that was created for this client +func (k *KubernetesClient) GetMapper() meta.RESTMapper { + return k.restMapper +} + +//GetDynamicClient returns the dynamic client that is needed for +//unstructured REST calls to the apiserver +func (k *KubernetesClient) GetDynamicClient() dynamic.Interface { + return k.dynamicClient +} + +// GetStandardClient returns the standard client that can be used to handle +// standard kubernetes kinds +func (k *KubernetesClient) GetStandardClient() kubernetes.Interface { + return k.clientSet +} + +//GetInstanceID returns the instanceID that is injected into all the +//resources created by the plugin +func (k *KubernetesClient) GetInstanceID() string { + return k.instanceID +} diff --git a/src/rsync/pkg/connector/connector.go b/src/rsync/pkg/connector/connector.go new file mode 100644 index 00000000..6e17f87a --- /dev/null +++ b/src/rsync/pkg/connector/connector.go @@ -0,0 +1,96 @@ +/* + * Copyright 2019 Intel Corporation, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package connector + +import ( + "log" + + "github.com/onap/multicloud-k8s/src/rsync/pkg/internal/config" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" +) + +// KubernetesConnector is an interface that is expected to be implemented +// by any code that calls the plugin framework functions. +// It implements methods that are needed by the plugins to get Kubernetes +// clients and other information needed to interface with Kubernetes +type KubernetesConnector interface { + //GetMapper returns the RESTMapper that was created for this client + GetMapper() meta.RESTMapper + + //GetDynamicClient returns the dynamic client that is needed for + //unstructured REST calls to the apiserver + GetDynamicClient() dynamic.Interface + + // GetStandardClient returns the standard client that can be used to handle + // standard kubernetes kinds + GetStandardClient() kubernetes.Interface + + //GetInstanceID returns the InstanceID for tracking during creation + GetInstanceID() string +} + +// Reference is the interface that is implemented +type Reference interface { + //Create a kubernetes resource described by the yaml in yamlFilePath + Create(yamlFilePath string, namespace string, client KubernetesConnector) (string, error) + //Delete a kubernetes resource described in the provided namespace + Delete(yamlFilePath string, resname string, namespace string, client KubernetesConnector) error +} + +// TagPodsIfPresent finds the PodTemplateSpec from any workload +// object that contains it and changes the spec to include the tag label +func TagPodsIfPresent(unstruct *unstructured.Unstructured, tag string) { + + spec, ok := unstruct.Object["spec"].(map[string]interface{}) + if !ok { + log.Println("Error converting spec to map") + return + } + + template, ok := spec["template"].(map[string]interface{}) + if !ok { + log.Println("Error converting template to map") + return + } + + //Attempt to convert the template to a podtemplatespec. + //This is to check if we have any pods being created. + podTemplateSpec := &corev1.PodTemplateSpec{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(template, podTemplateSpec) + if err != nil { + log.Println("Did not find a podTemplateSpec: " + err.Error()) + return + } + + labels := podTemplateSpec.GetLabels() + if labels == nil { + labels = map[string]string{} + } + labels[config.GetConfiguration().KubernetesLabelName] = tag + podTemplateSpec.SetLabels(labels) + + updatedTemplate, err := runtime.DefaultUnstructuredConverter.ToUnstructured(podTemplateSpec) + + //Set the label + spec["template"] = updatedTemplate +} diff --git a/src/rsync/pkg/context/context.go b/src/rsync/pkg/context/context.go new file mode 100644 index 00000000..67e589c9 --- /dev/null +++ b/src/rsync/pkg/context/context.go @@ -0,0 +1,547 @@ +/* + * Copyright 2020 Intel Corporation, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package context + +import ( + "encoding/json" + "fmt" + "log" + "os" + "sync" + "strings" + "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext" + pkgerrors "github.com/pkg/errors" + res "github.com/onap/multicloud-k8s/src/rsync/pkg/resource" + con "github.com/onap/multicloud-k8s/src/rsync/pkg/connector" + "github.com/onap/multicloud-k8s/src/rsync/pkg/app" +) + +type CompositeAppContext struct { + cid interface{} + appsorder string + appsdependency string + appsmap []instMap +} +type clusterInfo struct { + name string + resorder string + resdependency string + ressmap []instMap +} +type instMap struct { + name string + depinfo string + status string + rerr error + clusters []clusterInfo +} + +const basePath string = "/tmp/rsync/" + +func getInstMap(order string, dependency string, level string) ([]instMap, error) { + + if order == "" { + return nil, pkgerrors.Errorf("Not a valid order value") + } + if dependency == "" { + return nil, pkgerrors.Errorf("Not a valid dependency value") + } + + if !(level == "app" || level == "res") { + return nil, pkgerrors.Errorf("Not a valid level name given to create map") + } + + + var aov map[string]interface{} + json.Unmarshal([]byte(order), &aov) + + s := fmt.Sprintf("%vorder", level) + appso := aov[s].([]interface{}) + var instmap = make([]instMap, len(appso)) + + var adv map[string]interface{} + json.Unmarshal([]byte(dependency), &adv) + s = fmt.Sprintf("%vdependency", level) + appsd := adv[s].(map[string]interface{}) + for i, u := range appso { + instmap[i] = instMap{u.(string), appsd[u.(string)].(string), "none", nil, nil} + } + + return instmap, nil +} + +func deleteResource(clustername string, resname string, respath string) error { + k8sClient := app.KubernetesClient{} + err := k8sClient.Init(clustername, resname) + if err != nil { + log.Println("Init failed: " + err.Error()) + return err + } + + var c con.KubernetesConnector + c = &k8sClient + var gp res.Resource + err = gp.Delete(respath, resname, "default", c) + if err != nil { + log.Println("Delete resource failed: " + err.Error()) + return err + } + log.Println("Resource succesfully deleted") + return nil + +} + +func createResource(clustername string, resname string, respath string) error { + k8sClient := app.KubernetesClient{} + err := k8sClient.Init(clustername, resname) + if err != nil { + log.Println("Client init failed: " + err.Error()) + return err + } + + var c con.KubernetesConnector + c = &k8sClient + var gp res.Resource + _, err = gp.Create(respath,"default", c) + if err != nil { + log.Println("Create failed: " + err.Error()) + return err + } + log.Println("Resource succesfully created") + return nil + +} + +func terminateResource(ac appcontext.AppContext, resmap instMap, appname string, clustername string) error { + var resPath string = basePath + appname + "/" + clustername + "/resources/" + rh, err := ac.GetResourceHandle(appname, clustername, resmap.name) + if err != nil { + return err + } + + resval, err := ac.GetValue(rh) + if err != nil { + return err + } + + if resval != "" { + if _, err := os.Stat(resPath); os.IsNotExist(err) { + err = os.MkdirAll(resPath, 0755) + if err != nil { + return err + } + } + resPath := resPath + resmap.name + ".yaml" + f, err := os.Create(resPath) + defer f.Close() + if err != nil { + return err + } + _, err = f.WriteString(resval.(string)) + if err != nil { + return err + } + result := strings.Split(resmap.name, "+") + if result[0] == "" { + return pkgerrors.Errorf("Resource name is nil") + } + err = deleteResource(clustername, result[0], resPath) + if err != nil { + return err + } + //defer os.Remove(resPath) + } else { + return pkgerrors.Errorf("Resource value is nil") + } + + return nil + +} + +func instantiateResource(ac appcontext.AppContext, resmap instMap, appname string, clustername string) error { + var resPath string = basePath + appname + "/" + clustername + "/resources/" + rh, err := ac.GetResourceHandle(appname, clustername, resmap.name) + if err != nil { + return err + } + + resval, err := ac.GetValue(rh) + if err != nil { + return err + } + + if resval != "" { + if _, err := os.Stat(resPath); os.IsNotExist(err) { + err = os.MkdirAll(resPath, 0755) + if err != nil { + return err + } + } + resPath := resPath + resmap.name + ".yaml" + f, err := os.Create(resPath) + defer f.Close() + if err != nil { + return err + } + _, err = f.WriteString(resval.(string)) + if err != nil { + return err + } + result := strings.Split(resmap.name, "+") + if result[0] == "" { + return pkgerrors.Errorf("Resource name is nil") + } + err = createResource(clustername, result[0], resPath) + if err != nil { + return err + } + //defer os.Remove(resPath) + } else { + return pkgerrors.Errorf("Resource value is nil") + } + + return nil + +} + +func terminateResources(ac appcontext.AppContext, ressmap []instMap, appname string, clustername string) error { + var wg sync.WaitGroup + var chans = make([]chan int, len(ressmap)) + for l := range chans { + chans[l] = make(chan int) + } + for i:=0; i<len(ressmap); i++ { + wg.Add(1) + go func(index int) { + if ressmap[index].depinfo == "go" { + ressmap[index].status = "start" + } else { + ressmap[index].status = "waiting" + c := <- chans[index] + if c != index { + ressmap[index].status = "error" + ressmap[index].rerr = pkgerrors.Errorf("channel does not match") + wg.Done() + return + } + ressmap[index].status = "start" + } + ressmap[index].rerr = terminateResource(ac, ressmap[index], appname, clustername) + ressmap[index].status = "done" + waitstr := fmt.Sprintf("wait on %v",ressmap[index].name) + for j:=0; j<len(ressmap); j++ { + if ressmap[j].depinfo == waitstr { + chans[j] <- j + } + } + wg.Done() + }(i) + } + wg.Wait() + for k:=0; k<len(ressmap); k++ { + if ressmap[k].rerr != nil { + return pkgerrors.Errorf("error during resources termination") + } + } + return nil + +} + +func instantiateResources(ac appcontext.AppContext, ressmap []instMap, appname string, clustername string) error { + var wg sync.WaitGroup + var chans = make([]chan int, len(ressmap)) + for l := range chans { + chans[l] = make(chan int) + } + for i:=0; i<len(ressmap); i++ { + wg.Add(1) + go func(index int) { + if ressmap[index].depinfo == "go" { + ressmap[index].status = "start" + } else { + ressmap[index].status = "waiting" + c := <- chans[index] + if c != index { + ressmap[index].status = "error" + ressmap[index].rerr = pkgerrors.Errorf("channel does not match") + wg.Done() + return + } + ressmap[index].status = "start" + } + ressmap[index].rerr = instantiateResource(ac, ressmap[index], appname, clustername) + ressmap[index].status = "done" + waitstr := fmt.Sprintf("wait on %v",ressmap[index].name) + for j:=0; j<len(ressmap); j++ { + if ressmap[j].depinfo == waitstr { + chans[j] <- j + } + } + wg.Done() + }(i) + } + wg.Wait() + for k:=0; k<len(ressmap); k++ { + if ressmap[k].rerr != nil { + return pkgerrors.Errorf("error during resources instantiation") + } + } + return nil + +} + +func terminateApp(ac appcontext.AppContext, appmap instMap) error { + + for i:=0; i<len(appmap.clusters); i++ { + err := terminateResources(ac, appmap.clusters[i].ressmap, appmap.name, + appmap.clusters[i].name) + if err != nil { + return err + } + } + log.Println("Termination of app done: " + appmap.name) + + return nil + +} + + +func instantiateApp(ac appcontext.AppContext, appmap instMap) error { + + for i:=0; i<len(appmap.clusters); i++ { + err := instantiateResources(ac, appmap.clusters[i].ressmap, appmap.name, + appmap.clusters[i].name) + if err != nil { + return err + } + } + log.Println("Instantiation of app done: " + appmap.name) + return nil + +} + +func instantiateApps(ac appcontext.AppContext, appsmap []instMap) error { + var wg sync.WaitGroup + var chans = make([]chan int, len(appsmap)) + for l := range chans { + chans[l] = make(chan int) + } + for i:=0; i<len(appsmap); i++ { + wg.Add(1) + go func(index int) { + if appsmap[index].depinfo == "go" { + appsmap[index].status = "start" + } else { + appsmap[index].status = "waiting" + c := <- chans[index] + if c != index { + appsmap[index].status = "error" + appsmap[index].rerr = pkgerrors.Errorf("channel does not match") + wg.Done() + return + } + appsmap[index].status = "start" + } + appsmap[index].rerr = instantiateApp(ac, appsmap[index]) + appsmap[index].status = "done" + waitstr := fmt.Sprintf("wait on %v",appsmap[index].name) + for j:=0; j<len(appsmap); j++ { + if appsmap[j].depinfo == waitstr { + chans[j] <- j + } + } + wg.Done() + }(i) + } + wg.Wait() + for k:=0; k<len(appsmap); k++ { + if appsmap[k].rerr != nil { + return pkgerrors.Errorf("error during apps instantiation") + } + } + return nil + +} + +func (instca *CompositeAppContext) InstantiateComApp(cid interface{}) error { + ac := appcontext.AppContext{} + + _, err := ac.LoadAppContext(cid) + if err != nil { + return err + } + instca.cid = cid + + appsorder, err := ac.GetAppInstruction("order") + if err != nil { + return err + } + instca.appsorder = appsorder.(string) + appsdependency, err := ac.GetAppInstruction("dependency") + if err != nil { + return err + } + instca.appsdependency = appsdependency.(string) + instca.appsmap, err = getInstMap(instca.appsorder,instca.appsdependency, "app") + if err != nil { + return err + } + + for j:=0; j<len(instca.appsmap); j++ { + clusternames, err := ac.GetClusterNames(instca.appsmap[j].name) + if err != nil { + return err + } + instca.appsmap[j].clusters = make([]clusterInfo, len(clusternames)) + for k:=0; k<len(clusternames); k++ { + instca.appsmap[j].clusters[k].name = clusternames[k] + resorder, err := ac.GetResourceInstruction( + instca.appsmap[j].name, clusternames[k], "order") + if err != nil { + return err + } + instca.appsmap[j].clusters[k].resorder = resorder.(string) + + resdependency, err := ac.GetResourceInstruction( + instca.appsmap[j].name, clusternames[k], "dependency") + if err != nil { + return err + } + instca.appsmap[j].clusters[k].resdependency = resdependency.(string) + + instca.appsmap[j].clusters[k].ressmap, err = getInstMap( + instca.appsmap[j].clusters[k].resorder, + instca.appsmap[j].clusters[k].resdependency, "res") + if err != nil { + return err + } + } + } + err = instantiateApps(ac, instca.appsmap) + if err != nil { + return err + } + + return nil +} + +// Delete all the apps +func terminateApps(ac appcontext.AppContext, appsmap []instMap) error { + var wg sync.WaitGroup + var chans = make([]chan int, len(appsmap)) + for l := range chans { + chans[l] = make(chan int) + } + for i:=0; i<len(appsmap); i++ { + wg.Add(1) + go func(index int) { + if appsmap[index].depinfo == "go" { + appsmap[index].status = "start" + } else { + appsmap[index].status = "waiting" + c := <- chans[index] + if c != index { + appsmap[index].status = "error" + appsmap[index].rerr = pkgerrors.Errorf("channel does not match") + wg.Done() + return + } + appsmap[index].status = "start" + } + appsmap[index].rerr = terminateApp(ac, appsmap[index]) + appsmap[index].status = "done" + waitstr := fmt.Sprintf("wait on %v",appsmap[index].name) + for j:=0; j<len(appsmap); j++ { + if appsmap[j].depinfo == waitstr { + chans[j] <- j + } + } + wg.Done() + }(i) + } + wg.Wait() + for k:=0; k<len(appsmap); k++ { + if appsmap[k].rerr != nil { + return pkgerrors.Errorf("error during apps instantiation") + } + } + return nil + +} +// Delete all the resources for a given context +func (instca *CompositeAppContext) TerminateComApp(cid interface{}) error { + ac := appcontext.AppContext{} + + _, err := ac.LoadAppContext(cid) + if err != nil { + return err + } + instca.cid = cid + + appsorder, err := ac.GetAppInstruction("order") + if err != nil { + return err + } + instca.appsorder = appsorder.(string) + appsdependency, err := ac.GetAppInstruction("dependency") + if err != nil { + return err + } + instca.appsdependency = appsdependency.(string) + instca.appsmap, err = getInstMap(instca.appsorder,instca.appsdependency, "app") + if err != nil { + return err + } + + for j:=0; j<len(instca.appsmap); j++ { + clusternames, err := ac.GetClusterNames(instca.appsmap[j].name) + if err != nil { + return err + } + instca.appsmap[j].clusters = make([]clusterInfo, len(clusternames)) + for k:=0; k<len(clusternames); k++ { + instca.appsmap[j].clusters[k].name = clusternames[k] + resorder, err := ac.GetResourceInstruction( + instca.appsmap[j].name, clusternames[k], "order") + if err != nil { + return err + } + instca.appsmap[j].clusters[k].resorder = resorder.(string) + + resdependency, err := ac.GetResourceInstruction( + instca.appsmap[j].name, clusternames[k], "dependency") + if err != nil { + return err + } + instca.appsmap[j].clusters[k].resdependency = resdependency.(string) + + instca.appsmap[j].clusters[k].ressmap, err = getInstMap( + instca.appsmap[j].clusters[k].resorder, + instca.appsmap[j].clusters[k].resdependency, "res") + if err != nil { + return err + } + } + } + err = terminateApps(ac, instca.appsmap) + if err != nil { + return err + } + + return nil + +} diff --git a/src/rsync/pkg/grpc/installappserver/installappserver.go b/src/rsync/pkg/grpc/installappserver/installappserver.go index 28b4a585..68118ade 100644 --- a/src/rsync/pkg/grpc/installappserver/installappserver.go +++ b/src/rsync/pkg/grpc/installappserver/installappserver.go @@ -17,10 +17,8 @@ import ( "context" "encoding/json" "log" - "github.com/onap/multicloud-k8s/src/rsync/pkg/grpc/installapp" - //"google.golang.org/grpc/codes" - //"google.golang.org/grpc/status" + con "github.com/onap/multicloud-k8s/src/rsync/pkg/context" ) type installappServer struct { @@ -31,10 +29,17 @@ func (cs *installappServer) InstallApp(ctx context.Context, req *installapp.Inst installAppReq, _ := json.Marshal(req) log.Println("GRPC Server received installAppRequest: ", string(installAppReq)) - // Insert call to Server Functionality here - // - // - + // Try instantiate the comp app + instca := con.CompositeAppContext{} + err := instca.InstantiateComApp(req.GetAppContext()) + if err != nil { + log.Println("Instantiation failed: " + err.Error()) + err := instca.TerminateComApp(req.GetAppContext()) + if err != nil { + log.Println("Termination failed: " + err.Error()) + } + return &installapp.InstallAppResponse{AppContextInstalled: false}, err + } return &installapp.InstallAppResponse{AppContextInstalled: true}, nil } @@ -43,8 +48,12 @@ func (cs *installappServer) UninstallApp(ctx context.Context, req *installapp.Un log.Println("GRPC Server received uninstallAppRequest: ", string(uninstallAppReq)) // Try terminating the comp app here - // - // + instca := con.CompositeAppContext{} + err := instca.TerminateComApp(req.GetAppContext()) + if err != nil { + log.Println("Termination failed: " + err.Error()) + return &installapp.UninstallAppResponse{AppContextUninstalled: false}, err + } return &installapp.UninstallAppResponse{AppContextUninstalled: true}, nil } diff --git a/src/rsync/pkg/internal/config/config.go b/src/rsync/pkg/internal/config/config.go new file mode 100644 index 00000000..89f2553d --- /dev/null +++ b/src/rsync/pkg/internal/config/config.go @@ -0,0 +1,128 @@ +/* + * Copyright 2019 Intel Corporation, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package config + +import ( + "encoding/json" + "log" + "os" + "reflect" +) + +// Configuration loads up all the values that are used to configure +// backend implementations +type Configuration struct { + CAFile string `json:"ca-file"` + ServerCert string `json:"server-cert"` + ServerKey string `json:"server-key"` + Password string `json:"password"` + DatabaseAddress string `json:"database-address"` + DatabaseType string `json:"database-type"` + PluginDir string `json:"plugin-dir"` + EtcdIP string `json:"etcd-ip"` + EtcdCert string `json:"etcd-cert"` + EtcdKey string `json:"etcd-key"` + EtcdCAFile string `json:"etcd-ca-file"` + ServicePort string `json:"service-port"` + KubernetesLabelName string `json:"kubernetes-label-name"` +} + +// Config is the structure that stores the configuration +var gConfig *Configuration + +// readConfigFile reads the specified smsConfig file to setup some env variables +func readConfigFile(file string) (*Configuration, error) { + f, err := os.Open(file) + if err != nil { + return defaultConfiguration(), err + } + defer f.Close() + + // Setup some defaults here + // If the json file has values in it, the defaults will be overwritten + conf := defaultConfiguration() + + // Read the configuration from json file + decoder := json.NewDecoder(f) + err = decoder.Decode(conf) + if err != nil { + return conf, err + } + + return conf, nil +} + +func defaultConfiguration() *Configuration { + cwd, err := os.Getwd() + if err != nil { + log.Println("Error getting cwd. Using .") + cwd = "." + } + + return &Configuration{ + CAFile: "ca.cert", + ServerCert: "server.cert", + ServerKey: "server.key", + Password: "", + DatabaseAddress: "127.0.0.1", + DatabaseType: "mongo", + PluginDir: cwd, + EtcdIP: "127.0.0.1", + EtcdCert: "", + EtcdKey: "", + EtcdCAFile: "", + ServicePort: "9015", + KubernetesLabelName: "k8splugin.io/rb-instance-id", + } +} + +// GetConfiguration returns the configuration for the app. +// It will try to load it if it is not already loaded. +func GetConfiguration() *Configuration { + if gConfig == nil { + conf, err := readConfigFile("k8sconfig.json") + if err != nil { + log.Println("Error loading config file. Using defaults.") + } + gConfig = conf + } + + return gConfig +} + +// SetConfigValue sets a value in the configuration +// This is mostly used to customize the application and +// should be used carefully. +func SetConfigValue(key string, value string) *Configuration { + c := GetConfiguration() + if value == "" || key == "" { + return c + } + + v := reflect.ValueOf(c).Elem() + if v.Kind() == reflect.Struct { + f := v.FieldByName(key) + if f.IsValid() { + if f.CanSet() { + if f.Kind() == reflect.String { + f.SetString(value) + } + } + } + } + return c +} diff --git a/src/rsync/pkg/internal/utils.go b/src/rsync/pkg/internal/utils.go new file mode 100644 index 00000000..59ff6df8 --- /dev/null +++ b/src/rsync/pkg/internal/utils.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 Intel Corporation. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "io/ioutil" + "os" + "path" + + pkgerrors "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" +) + +// DecodeYAML reads a YAMl file to extract the Kubernetes object definition +func DecodeYAML(path string, into runtime.Object) (runtime.Object, error) { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return nil, pkgerrors.New("File " + path + " not found") + } else { + return nil, pkgerrors.Wrap(err, "Stat file error") + } + } + + rawBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, pkgerrors.Wrap(err, "Read YAML file error") + } + + decode := scheme.Codecs.UniversalDeserializer().Decode + obj, _, err := decode(rawBytes, nil, into) + if err != nil { + return nil, pkgerrors.Wrap(err, "Deserialize YAML error") + } + + return obj, nil +} + +//EnsureDirectory makes sure that the directories specified in the path exist +//If not, it will create them, if possible. +func EnsureDirectory(f string) error { + base := path.Dir(f) + _, err := os.Stat(base) + if err != nil && !os.IsNotExist(err) { + return err + } + return os.MkdirAll(base, 0755) +} diff --git a/src/rsync/pkg/resource/resource.go b/src/rsync/pkg/resource/resource.go new file mode 100644 index 00000000..9d715697 --- /dev/null +++ b/src/rsync/pkg/resource/resource.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 Intel Corporation. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + pkgerrors "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + + utils "github.com/onap/multicloud-k8s/src/rsync/pkg/internal" + "github.com/onap/multicloud-k8s/src/rsync/pkg/internal/config" + "github.com/onap/multicloud-k8s/src/rsync/pkg/connector" +) + +type Resource struct { +} + +// Create deployment object in a specific Kubernetes cluster +func (r Resource) Create(yamlFilePath string, namespace string, client connector.KubernetesConnector) (string, error) { + if namespace == "" { + namespace = "default" + } + + //Decode the yaml file to create a runtime.Object + unstruct := &unstructured.Unstructured{} + //Ignore the returned obj as we expect the data in unstruct + _, err := utils.DecodeYAML(yamlFilePath, unstruct) + if err != nil { + return "", pkgerrors.Wrap(err, "Decode deployment object error") + } + + dynClient := client.GetDynamicClient() + mapper := client.GetMapper() + + gvk := unstruct.GroupVersionKind() + mapping, err := mapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) + if err != nil { + return "", pkgerrors.Wrap(err, "Mapping kind to resource error") + } + + //Add the tracking label to all resources created here + labels := unstruct.GetLabels() + //Check if labels exist for this object + if labels == nil { + labels = map[string]string{} + } + labels[config.GetConfiguration().KubernetesLabelName] = client.GetInstanceID() + unstruct.SetLabels(labels) + + // This checks if the resource we are creating has a podSpec in it + // Eg: Deployment, StatefulSet, Job etc.. + // If a PodSpec is found, the label will be added to it too. + connector.TagPodsIfPresent(unstruct, client.GetInstanceID()) + + gvr := mapping.Resource + var createdObj *unstructured.Unstructured + + switch mapping.Scope.Name() { + case meta.RESTScopeNameNamespace: + createdObj, err = dynClient.Resource(gvr).Namespace(namespace).Create(unstruct, metav1.CreateOptions{}) + case meta.RESTScopeNameRoot: + createdObj, err = dynClient.Resource(gvr).Create(unstruct, metav1.CreateOptions{}) + default: + return "", pkgerrors.New("Got an unknown RESTSCopeName for mapping: " + gvk.String()) + } + + if err != nil { + return "", pkgerrors.Wrap(err, "Create object error") + } + + return createdObj.GetName(), nil +} + +// Delete an existing resource hosted in a specific Kubernetes cluster +func (r Resource) Delete(yamlFilePath string, resname string, namespace string, client connector.KubernetesConnector) error { + if namespace == "" { + namespace = "default" + } + + //Decode the yaml file to create a runtime.Object + unstruct := &unstructured.Unstructured{} + //Ignore the returned obj as we expect the data in unstruct + _, err := utils.DecodeYAML(yamlFilePath, unstruct) + if err != nil { + return pkgerrors.Wrap(err, "Decode deployment object error") + } + + dynClient := client.GetDynamicClient() + mapper := client.GetMapper() + + gvk := unstruct.GroupVersionKind() + mapping, err := mapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) + if err != nil { + return pkgerrors.Wrap(err, "Mapping kind to resource error") + } + + gvr := mapping.Resource + deletePolicy := metav1.DeletePropagationForeground + opts := &metav1.DeleteOptions{ + PropagationPolicy: &deletePolicy, + } + + switch mapping.Scope.Name() { + case meta.RESTScopeNameNamespace: + err = dynClient.Resource(gvr).Namespace(namespace).Delete(resname, opts) + case meta.RESTScopeNameRoot: + err = dynClient.Resource(gvr).Delete(resname, opts) + default: + return pkgerrors.New("Got an unknown RESTSCopeName for mappin") + } + + if err != nil { + return pkgerrors.Wrap(err, "Delete object error") + } + return nil +} |