summaryrefslogtreecommitdiffstats
path: root/kud/tests
diff options
context:
space:
mode:
authorRajamohan Raj <rajamohan.raj@intel.com>2020-08-04 00:18:16 +0000
committerRitu Sood <Ritu.Sood@intel.com>2020-08-21 00:12:44 +0000
commite3e6c103f6e1cd1431ddacd23d500f39fbd1bcee (patch)
tree25ab66ea5bca905d3251f830a8b7183b1ae8ba1e /kud/tests
parent38ed3fbf85636ad5f73538c2de07cce72499f5bf (diff)
Updating m3db & m3db operator charts
In this patch, updated all the charts related to m3db. Developed scripts for deployment m3db stack through orchestrator. Issue-ID: MULTICLOUD-1112 Signed-off-by: Rajamohan Raj <rajamohan.raj@intel.com> Change-Id: I42677809709fc4d12f16a156e563d6618a8f8437
Diffstat (limited to 'kud/tests')
-rw-r--r--kud/tests/_common.sh20
-rwxr-xr-xkud/tests/cluster1-m3db-installer.sh382
-rwxr-xr-xkud/tests/cluster1-m3db-operator-test.sh (renamed from kud/tests/m3db-operator-test.sh)22
-rwxr-xr-xkud/tests/cluster2-m3db-installer.sh387
-rwxr-xr-xkud/tests/cluster2-m3db-operator-test.sh386
-rwxr-xr-xkud/tests/prometheus-test.sh93
-rwxr-xr-xkud/tests/sanity-check-for-v2.sh5
-rw-r--r--kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml4
-rwxr-xr-xkud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml2
-rw-r--r--kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/diff.txt145
-rwxr-xr-xkud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml33
-rw-r--r--kud/tests/vnfs/comp-app/collection/app3/helm/m3db/.helmignore22
-rw-r--r--kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/NOTES.txt1
-rw-r--r--kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/_helpers.tpl32
-rw-r--r--kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/configmap.yaml218
-rw-r--r--kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/etcd-cluster.yaml20
-rw-r--r--kud/tests/vnfs/comp-app/collection/app3/helm/m3db/values.yaml52
-rw-r--r--kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/Chart.yaml (renamed from kud/tests/vnfs/comp-app/collection/app3/helm/m3db/Chart.yaml)0
-rw-r--r--kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/del.yaml49
-rw-r--r--kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/templates/m3dbcluster.yaml (renamed from kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/m3dbcluster.yaml)19
-rw-r--r--kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/values.yaml29
-rw-r--r--kud/tests/vnfs/comp-app/collection/m3db/profile/manifest.yaml (renamed from kud/tests/vnfs/comp-app/collection/app3/profile/manifest.yaml)0
-rw-r--r--kud/tests/vnfs/comp-app/collection/m3db/profile/override_values.yaml (renamed from kud/tests/vnfs/comp-app/collection/app3/profile/override_values.yaml)0
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/Chart.yaml16
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/LICENSE (renamed from kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/LICENSE)0
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/NOTES.txt (renamed from kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/NOTES.txt)0
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/README.md (renamed from kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/README.md)0
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/cluster_role.yaml (renamed from kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/cluster_role.yaml)2
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/cluster_role_binding.yaml (renamed from kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/cluster_role_binding.yaml)0
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/etcd-cluster/etcd-basic.yaml86
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/service_account.yaml (renamed from kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/service_account.yaml)0
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/stateful_set.yaml (renamed from kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/stateful_set.yaml)4
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/values.yaml (renamed from kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/values.yaml)2
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/profile/manifest.yaml (renamed from kud/tests/vnfs/comp-app/collection/operators/profile/manifest.yaml)0
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators-latest/profile/override_values.yaml (renamed from kud/tests/vnfs/comp-app/collection/operators/profile/override_values.yaml)0
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/.helmignore22
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/Chart.yaml5
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/.helmignore21
-rwxr-xr-xkud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/Chart.yaml14
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/OWNERS6
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/README.md169
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/NOTES.txt27
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/_helpers.tpl53
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/backup-etcd-crd.yaml18
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/backup-operator-deployment.yaml60
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/etcd-cluster-crd.yaml51
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-cluster-role.yaml49
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-clusterrole-binding.yaml20
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-deployment.yaml83
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-service-account.yaml13
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-etcd-crd.yaml28
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-operator-deployment.yaml64
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-operator-service.yaml20
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/values.yaml162
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/Chart.yaml22
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/requirements.yaml5
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/resources/m3db.labels7
-rw-r--r--kud/tests/vnfs/comp-app/collection/operators/helm/operator/values.yaml38
58 files changed, 1416 insertions, 1572 deletions
diff --git a/kud/tests/_common.sh b/kud/tests/_common.sh
index 35023708..b56972c8 100644
--- a/kud/tests/_common.sh
+++ b/kud/tests/_common.sh
@@ -1186,17 +1186,14 @@ function populate_CSAR_eaa_sample_app_rbdefinition {
function populate_CSAR_composite_app_helm {
_checks_args "$1"
pushd "${CSAR_DIR}/$1"
- print_msg "Create Helm Chart Archives for compositeApp"
+ print_msg "Create Helm Chart Archives for compositeAppi with collectd and prometheus-grafana helm charts"
rm -f *.tar.gz
tar -czf collectd.tar.gz -C $test_folder/vnfs/comp-app/collection/app1/helm .
tar -czf prometheus-operator.tar.gz -C $test_folder/vnfs/comp-app/collection/app2/helm .
- tar -czf m3db.tar.gz -C $test_folder/vnfs/comp-app/collection/app3/helm .
tar -czf collectd_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/app1/profile .
tar -czf prometheus-operator_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/app2/profile .
- tar -czf m3db_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/app3/profile .
export prometheus_helm_path="${CSAR_DIR}/$1/prometheus-operator.tar.gz"
export collectd_helm_path="${CSAR_DIR}/$1/collectd.tar.gz"
- export m3db_helm_path="${CSAR_DIR}/$1/m3db.tar.gz"
popd
}
@@ -1206,9 +1203,24 @@ function populate_CSAR_operator_helm {
pushd "${CSAR_DIR}/$1"
print_msg "Create Helm Chart Archives for operators"
rm -f *.tar.gz
+ #tar -czf operator.tar.gz -C $test_folder/vnfs/comp-app/collection/operators-latest/helm .
+ #tar -czf operator_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/operators-latest/profile .
tar -czf operator.tar.gz -C $test_folder/vnfs/comp-app/collection/operators/helm .
tar -czf operator_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/operators/profile .
export operator_helm_path="${CSAR_DIR}/$1/operator.tar.gz"
popd
}
+
+function populate_CSAR_m3db_helm {
+ _checks_args "$1"
+ pushd "${CSAR_DIR}/$1"
+ print_msg "Create Helm Chart Archives for m3db"
+ rm -f *.tar.gz
+ #tar -czf m3db.tar.gz -C $test_folder/vnfs/comp-app/collection/app3-latest/helm .
+ #tar -czf m3db_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/app3-latest/profile .
+ tar -czf m3db.tar.gz -C $test_folder/vnfs/comp-app/collection/m3db/helm .
+ tar -czf m3db_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/m3db/profile .
+ export m3db_helm_path="${CSAR_DIR}/$1/m3db.tar.gz"
+ popd
+}
diff --git a/kud/tests/cluster1-m3db-installer.sh b/kud/tests/cluster1-m3db-installer.sh
new file mode 100755
index 00000000..848313f2
--- /dev/null
+++ b/kud/tests/cluster1-m3db-installer.sh
@@ -0,0 +1,382 @@
+# /*
+# * Copyright 2020 Intel Corporation, Inc
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+
+source _common_test.sh
+source _functions.sh
+source _common.sh
+
+
+base_url_clm=${base_url_clm:-"http://192.168.121.29:30073/v2"}
+base_url_ncm=${base_url_ncm:-"http://192.168.121.29:31955/v2"}
+base_url_orchestrator=${base_url_orchestrator:-"http://192.168.121.29:32447/v2"}
+base_url_rysnc=${base_url_orchestrator:-"http://192.168.121.29:32002/v2"}
+
+CSAR_DIR="/opt/csar"
+csar_id="m3db-cb009bfe-bbee-11e8-9766-525400435678"
+
+app1_helm_path="$CSAR_DIR/$csar_id/m3db.tar.gz"
+app1_profile_path="$CSAR_DIR/$csar_id/m3db_profile.tar.gz"
+
+# ---------BEGIN: SET CLM DATA---------------
+
+clusterprovidername="collection-m3db-installer-cluster1-provider"
+clusterproviderdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$clusterprovidername",
+ "description": "description of $clusterprovidername",
+ "userData1": "$clusterprovidername user data 1",
+ "userData2": "$clusterprovidername user data 2"
+ }
+}
+EOF
+)"
+
+clustername="cluster1"
+clusterdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$clustername",
+ "description": "description of $clustername",
+ "userData1": "$clustername user data 1",
+ "userData2": "$clustername user data 2"
+ }
+}
+EOF
+)"
+
+kubeconfigcluster1="/opt/kud/multi-cluster/cluster1/artifacts/admin.conf"
+
+labelname="LabelCluster2"
+labeldata="$(cat<<EOF
+{"label-name": "$labelname"}
+EOF
+)"
+
+#--TODO--Creating provider network and network intents----
+
+# add the rsync controller entry
+rsynccontrollername="rsync"
+rsynccontrollerdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "rsync",
+ "description": "description of $rsynccontrollername controller",
+ "userData1": "user data 1 for $rsynccontrollername",
+ "userData2": "user data 2 for $rsynccontrollername"
+ },
+ "spec": {
+ "host": "${rsynccontrollername}",
+ "port": 9041
+ }
+}
+EOF
+)"
+
+# ------------END: SET CLM DATA--------------
+
+#-------------BEGIN:SET ORCH DATA------------------
+
+# define a project
+projectname="M3dbInstaller-cluster1Old-Project"
+projectdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$projectname",
+ "description": "description of $projectname controller",
+ "userData1": "$projectname user data 1",
+ "userData2": "$projectname user data 2"
+ }
+}
+EOF
+)"
+
+# define a composite application
+m3db_compositeapp_name="OperatorsCompositeApp"
+compositeapp_version="v1"
+compositeapp_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${m3db_compositeapp_name}",
+ "description": "description of ${m3db_compositeapp_name}",
+ "userData1": "user data 1 for ${m3db_compositeapp_name}",
+ "userData2": "user data 2 for ${m3db_compositeapp_name}"
+ },
+ "spec":{
+ "version":"${compositeapp_version}"
+ }
+}
+EOF
+)"
+
+
+# add m3db app into compositeApp
+
+m3db_app_name="m3db"
+m3db_helm_chart=${app1_helm_path}
+
+m3db_app_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${m3db_app_name}",
+ "description": "description for app ${m3db_app_name}",
+ "userData1": "user data 2 for ${m3db_app_name}",
+ "userData2": "user data 2 for ${m3db_app_name}"
+ }
+}
+EOF
+)"
+
+# Add the composite profile
+m3db_composite_profile_name="m3db_composite-profile"
+m3db_composite_profile_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${m3db_composite_profile_name}",
+ "description":"description of ${m3db_composite_profile_name}",
+ "userData1":"user data 1 for ${m3db_composite_profile_name}",
+ "userData2":"user data 2 for ${m3db_composite_profile_name}"
+ }
+}
+EOF
+)"
+
+
+# Add the m3db profile data into composite profile data
+m3db_profile_name="m3db-profile"
+m3db_profile_file=$app1_profile_path
+m3db_profile_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${m3db_profile_name}",
+ "description":"description of ${m3db_profile_name}",
+ "userData1":"user data 1 for ${m3db_profile_name}",
+ "userData2":"user data 2 for ${m3db_profile_name}"
+ },
+ "spec":{
+ "app-name": "${m3db_app_name}"
+ }
+}
+EOF
+)"
+
+
+# define the generic placement intent
+generic_placement_intent_name="M3db-generic-placement-intent"
+generic_placement_intent_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${generic_placement_intent_name}",
+ "description":"${generic_placement_intent_name}",
+ "userData1":"${generic_placement_intent_name}",
+ "userData2":"${generic_placement_intent_name}"
+ },
+ "spec":{
+ "logical-cloud":"unused_logical_cloud"
+ }
+}
+EOF
+)"
+
+
+# define placement intent for m3db as sub-app
+m3db_placement_intent_name="m3db-placement-intent"
+m3db_placement_intent_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${m3db_placement_intent_name}",
+ "description":"description of ${m3db_placement_intent_name}",
+ "userData1":"user data 1 for ${m3db_placement_intent_name}",
+ "userData2":"user data 2 for ${m3db_placement_intent_name}"
+ },
+ "spec":{
+ "app-name":"${m3db_app_name}",
+ "intent":{
+ "allOf":[
+ { "provider-name":"${clusterprovidername}",
+ "cluster-label-name":"${labelname}"
+ }
+ ]
+ }
+ }
+}
+EOF
+)"
+
+
+# define a deployment intent group
+release="m3db"
+deployment_intent_group_name="m3db_deployment_intent_group"
+deployment_intent_group_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${deployment_intent_group_name}",
+ "description":"descriptiont of ${deployment_intent_group_name}",
+ "userData1":"user data 1 for ${deployment_intent_group_name}",
+ "userData2":"user data 2 for ${deployment_intent_group_name}"
+ },
+ "spec":{
+ "profile":"${m3db_composite_profile_name}",
+ "version":"${release}",
+ "override-values":[]
+ }
+}
+EOF
+)"
+
+# define the intents to be used by the group
+deployment_intents_in_group_name="m3db_deploy_intents"
+deployment_intents_in_group_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${deployment_intents_in_group_name}",
+ "description":"descriptionf of ${deployment_intents_in_group_name}",
+ "userData1":"user data 1 for ${deployment_intents_in_group_name}",
+ "userData2":"user data 2 for ${deployment_intents_in_group_name}"
+ },
+ "spec":{
+ "intent":{
+ "genericPlacementIntent":"${generic_placement_intent_name}"
+ }
+ }
+}
+EOF
+)"
+
+#---------END: SET ORCH DATA--------------------
+
+function createOrchestratorData {
+ print_msg "creating controller entries"
+ call_api -d "${rsynccontrollerdata}" "${base_url_orchestrator}/controllers"
+
+
+ print_msg "creating project entry"
+ call_api -d "${projectdata}" "${base_url_orchestrator}/projects"
+
+
+ print_msg "creating m3db composite app entry"
+ call_api -d "${compositeapp_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps"
+
+ print_msg "adding m3db sub-app to the composite app"
+ call_api -F "metadata=${m3db_app_data}" \
+ -F "file=@${m3db_helm_chart}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/apps"
+
+
+ print_msg "creating m3db composite profile entry"
+ call_api -d "${m3db_composite_profile_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/composite-profiles"
+
+ print_msg "adding m3db sub-app profile to the composite profile"
+ call_api -F "metadata=${m3db_profile_data}" \
+ -F "file=@${m3db_profile_file}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/composite-profiles/${m3db_composite_profile_name}/profiles"
+
+
+
+ print_msg "create the generic placement intent"
+ call_api -d "${generic_placement_intent_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/generic-placement-intents"
+ print_msg "add the m3db app placement intent to the generic placement intent"
+ call_api -d "${m3db_placement_intent_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents"
+
+ print_msg "create the deployment intent group"
+ call_api -d "${deployment_intent_group_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups"
+ call_api -d "${deployment_intents_in_group_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents"
+ print_msg "finished orch data creation"
+}
+
+function deleteOrchestratorData {
+
+ # TODO- delete rsync controller and any other controller
+ delete_resource "${base_url_orchestrator}/controllers/${rsynccontrollername}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${m3db_placement_intent_name}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/composite-profiles/${m3db_composite_profile_name}/profiles/${m3db_profile_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/composite-profiles/${m3db_composite_profile_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/apps/${m3db_app_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}"
+ print_msg "finished orch data deletion"
+
+}
+
+function createClmData {
+ print_msg "Creating cluster provider and cluster"
+ call_api -d "${clusterproviderdata}" "${base_url_clm}/cluster-providers"
+ call_api -H "Content-Type: multipart/form-data" -F "metadata=$clusterdata" -F "file=@$kubeconfigcluster1" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters"
+ call_api -d "${labeldata}" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels"
+
+}
+
+
+function deleteClmData {
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels/${labelname}"
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}"
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}"
+}
+
+
+function createData {
+ createClmData
+ createOrchestratorData
+}
+
+function deleteData {
+ deleteClmData
+ deleteOrchestratorData
+}
+
+function instantiate {
+ call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"
+}
+
+function terminateOrchData {
+ call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/terminate"
+}
+
+
+# Setup
+install_deps
+populate_CSAR_m3db_helm "$csar_id"
+
+#terminateOrchData
+deleteData
+createData
+instantiate
+
+
diff --git a/kud/tests/m3db-operator-test.sh b/kud/tests/cluster1-m3db-operator-test.sh
index d5ea90db..5dd95c67 100755
--- a/kud/tests/m3db-operator-test.sh
+++ b/kud/tests/cluster1-m3db-operator-test.sh
@@ -29,10 +29,11 @@ source _common.sh
# base_url_clm=${base_url_clm:-"http://localhost:9019/v2"}
# base_url_ncm=${base_url_ncm:-"http://localhost:9016/v2"}
-base_url_orchestrator=${base_url_orchestrator:-"http://192.168.121.29:30356/v2"}
-base_url_clm=${base_url_clm:-"http://192.168.121.29:32551/v2"}
-base_url_ncm=${base_url_ncm:-"http://192.168.121.29:31406/v2"}
-base_url_rysnc=${base_url_rysnc:-"http://192.168.121.29:31751/v2"}
+base_url_clm=${base_url_clm:-"http://192.168.121.29:30073/v2"}
+base_url_ncm=${base_url_ncm:-"http://192.168.121.29:31955/v2"}
+base_url_orchestrator=${base_url_orchestrator:-"http://192.168.121.29:32447/v2"}
+base_url_rysnc=${base_url_orchestrator:-"http://192.168.121.29:32002/v2"}
+
CSAR_DIR="/opt/csar"
csar_id="operators-cb009bfe-bbee-11e8-9766-525400435678"
@@ -43,11 +44,9 @@ app1_profile_path="$CSAR_DIR/$csar_id/operator_profile.tar.gz"
-
-
# ---------BEGIN: SET CLM DATA---------------
-clusterprovidername="collection-operator-cluster-provider"
+clusterprovidername="collection-cluster1-provider"
clusterproviderdata="$(cat<<EOF
{
"metadata": {
@@ -107,7 +106,7 @@ EOF
#-------------BEGIN:SET ORCH DATA------------------
# define a project
-projectname="OperatorsProject"
+projectname="OperatorsProjectCluster1"
projectdata="$(cat<<EOF
{
"metadata": {
@@ -370,10 +369,17 @@ function instantiate {
call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"
}
+
+function terminateOrchData {
+ call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/terminate"
+ }
+
# Setup
install_deps
populate_CSAR_operator_helm "$csar_id"
+
+#terminateOrchData
deleteData
createData
instantiate
diff --git a/kud/tests/cluster2-m3db-installer.sh b/kud/tests/cluster2-m3db-installer.sh
new file mode 100755
index 00000000..b9b9b3ef
--- /dev/null
+++ b/kud/tests/cluster2-m3db-installer.sh
@@ -0,0 +1,387 @@
+# /*
+# * Copyright 2020 Intel Corporation, Inc
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+
+source _common_test.sh
+source _functions.sh
+source _common.sh
+
+
+base_url_clm=${base_url_clm:-"http://192.168.121.29:30073/v2"}
+base_url_ncm=${base_url_ncm:-"http://192.168.121.29:31955/v2"}
+base_url_orchestrator=${base_url_orchestrator:-"http://192.168.121.29:32447/v2"}
+base_url_rysnc=${base_url_orchestrator:-"http://192.168.121.29:32002/v2"}
+
+CSAR_DIR="/opt/csar"
+csar_id="m3db-cb009bfe-bbee-11e8-9766-525400435678"
+
+app1_helm_path="$CSAR_DIR/$csar_id/m3db.tar.gz"
+app1_profile_path="$CSAR_DIR/$csar_id/m3db_profile.tar.gz"
+
+# ---------BEGIN: SET CLM DATA---------------
+
+clusterprovidername="collection-m3db-installer-cluster2-provider"
+clusterproviderdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$clusterprovidername",
+ "description": "description of $clusterprovidername",
+ "userData1": "$clusterprovidername user data 1",
+ "userData2": "$clusterprovidername user data 2"
+ }
+}
+EOF
+)"
+
+clustername="cluster2"
+clusterdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$clustername",
+ "description": "description of $clustername",
+ "userData1": "$clustername user data 1",
+ "userData2": "$clustername user data 2"
+ }
+}
+EOF
+)"
+
+kubeconfigcluster2="/opt/kud/multi-cluster/cluster2/artifacts/admin.conf"
+
+labelname="LabelCluster2"
+labeldata="$(cat<<EOF
+{"label-name": "$labelname"}
+EOF
+)"
+
+#--TODO--Creating provider network and network intents----
+
+# add the rsync controller entry
+rsynccontrollername="rsync"
+rsynccontrollerdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "rsync",
+ "description": "description of $rsynccontrollername controller",
+ "userData1": "user data 1 for $rsynccontrollername",
+ "userData2": "user data 2 for $rsynccontrollername"
+ },
+ "spec": {
+ "host": "${rsynccontrollername}",
+ "port": 9041
+ }
+}
+EOF
+)"
+
+# ------------END: SET CLM DATA--------------
+
+#-------------BEGIN:SET ORCH DATA------------------
+
+# define a project
+projectname="M3dbInstaller-cluster1-Project"
+projectdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$projectname",
+ "description": "description of $projectname controller",
+ "userData1": "$projectname user data 1",
+ "userData2": "$projectname user data 2"
+ }
+}
+EOF
+)"
+
+# define a composite application
+m3db_compositeapp_name="OperatorsCompositeApp"
+compositeapp_version="v1"
+compositeapp_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${m3db_compositeapp_name}",
+ "description": "description of ${m3db_compositeapp_name}",
+ "userData1": "user data 1 for ${m3db_compositeapp_name}",
+ "userData2": "user data 2 for ${m3db_compositeapp_name}"
+ },
+ "spec":{
+ "version":"${compositeapp_version}"
+ }
+}
+EOF
+)"
+
+
+# add m3db app into compositeApp
+
+m3db_app_name="m3db"
+m3db_helm_chart=${app1_helm_path}
+
+m3db_app_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${m3db_app_name}",
+ "description": "description for app ${m3db_app_name}",
+ "userData1": "user data 2 for ${m3db_app_name}",
+ "userData2": "user data 2 for ${m3db_app_name}"
+ }
+}
+EOF
+)"
+
+# Add the composite profile
+m3db_composite_profile_name="m3db_composite-profile"
+m3db_composite_profile_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${m3db_composite_profile_name}",
+ "description":"description of ${m3db_composite_profile_name}",
+ "userData1":"user data 1 for ${m3db_composite_profile_name}",
+ "userData2":"user data 2 for ${m3db_composite_profile_name}"
+ }
+}
+EOF
+)"
+
+
+# Add the m3db profile data into composite profile data
+m3db_profile_name="m3db-profile"
+m3db_profile_file=$app1_profile_path
+m3db_profile_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${m3db_profile_name}",
+ "description":"description of ${m3db_profile_name}",
+ "userData1":"user data 1 for ${m3db_profile_name}",
+ "userData2":"user data 2 for ${m3db_profile_name}"
+ },
+ "spec":{
+ "app-name": "${m3db_app_name}"
+ }
+}
+EOF
+)"
+
+
+# define the generic placement intent
+generic_placement_intent_name="M3db-generic-placement-intent"
+generic_placement_intent_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${generic_placement_intent_name}",
+ "description":"${generic_placement_intent_name}",
+ "userData1":"${generic_placement_intent_name}",
+ "userData2":"${generic_placement_intent_name}"
+ },
+ "spec":{
+ "logical-cloud":"unused_logical_cloud"
+ }
+}
+EOF
+)"
+
+
+# define placement intent for m3db as sub-app
+m3db_placement_intent_name="m3db-placement-intent"
+m3db_placement_intent_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${m3db_placement_intent_name}",
+ "description":"description of ${m3db_placement_intent_name}",
+ "userData1":"user data 1 for ${m3db_placement_intent_name}",
+ "userData2":"user data 2 for ${m3db_placement_intent_name}"
+ },
+ "spec":{
+ "app-name":"${m3db_app_name}",
+ "intent":{
+ "allOf":[
+ { "provider-name":"${clusterprovidername}",
+ "cluster-label-name":"${labelname}"
+ }
+ ]
+ }
+ }
+}
+EOF
+)"
+
+
+# define a deployment intent group
+release="m3db"
+deployment_intent_group_name="m3db_deployment_intent_group"
+deployment_intent_group_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${deployment_intent_group_name}",
+ "description":"descriptiont of ${deployment_intent_group_name}",
+ "userData1":"user data 1 for ${deployment_intent_group_name}",
+ "userData2":"user data 2 for ${deployment_intent_group_name}"
+ },
+ "spec":{
+ "profile":"${m3db_composite_profile_name}",
+ "version":"${release}",
+ "override-values":[]
+ }
+}
+EOF
+)"
+
+# define the intents to be used by the group
+deployment_intents_in_group_name="m3db_deploy_intents"
+deployment_intents_in_group_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${deployment_intents_in_group_name}",
+ "description":"descriptionf of ${deployment_intents_in_group_name}",
+ "userData1":"user data 1 for ${deployment_intents_in_group_name}",
+ "userData2":"user data 2 for ${deployment_intents_in_group_name}"
+ },
+ "spec":{
+ "intent":{
+ "genericPlacementIntent":"${generic_placement_intent_name}"
+ }
+ }
+}
+EOF
+)"
+
+#---------END: SET ORCH DATA--------------------
+
+function createOrchestratorData {
+ print_msg "creating controller entries"
+ call_api -d "${rsynccontrollerdata}" "${base_url_orchestrator}/controllers"
+
+
+ print_msg "creating project entry"
+ call_api -d "${projectdata}" "${base_url_orchestrator}/projects"
+
+
+ print_msg "creating m3db composite app entry"
+ call_api -d "${compositeapp_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps"
+
+ print_msg "adding m3db sub-app to the composite app"
+ call_api -F "metadata=${m3db_app_data}" \
+ -F "file=@${m3db_helm_chart}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/apps"
+
+
+ print_msg "creating m3db composite profile entry"
+ call_api -d "${m3db_composite_profile_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/composite-profiles"
+
+ print_msg "adding m3db sub-app profile to the composite profile"
+ call_api -F "metadata=${m3db_profile_data}" \
+ -F "file=@${m3db_profile_file}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/composite-profiles/${m3db_composite_profile_name}/profiles"
+
+
+
+ print_msg "create the generic placement intent"
+ call_api -d "${generic_placement_intent_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/generic-placement-intents"
+ print_msg "add the m3db app placement intent to the generic placement intent"
+ call_api -d "${m3db_placement_intent_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents"
+
+ print_msg "create the deployment intent group"
+ call_api -d "${deployment_intent_group_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups"
+ call_api -d "${deployment_intents_in_group_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents"
+ print_msg "finished orch data creation"
+}
+
+function deleteOrchestratorData {
+
+ # TODO- delete rsync controller and any other controller
+ delete_resource "${base_url_orchestrator}/controllers/${rsynccontrollername}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${m3db_placement_intent_name}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/composite-profiles/${m3db_composite_profile_name}/profiles/${m3db_profile_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/composite-profiles/${m3db_composite_profile_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/apps/${m3db_app_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}"
+ print_msg "finished orch data deletion"
+
+}
+
+function createClmData {
+ print_msg "Creating cluster provider and cluster"
+ call_api -d "${clusterproviderdata}" "${base_url_clm}/cluster-providers"
+ call_api -H "Content-Type: multipart/form-data" -F "metadata=$clusterdata" -F "file=@$kubeconfigcluster2" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters"
+ call_api -d "${labeldata}" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels"
+
+}
+
+
+function deleteClmData {
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels/${labelname}"
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}"
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}"
+}
+
+
+function createData {
+ createClmData
+ createOrchestratorData
+}
+
+function deleteData {
+ deleteClmData
+ deleteOrchestratorData
+}
+
+function instantiate {
+ call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"
+}
+
+function terminateOrchData {
+ call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${m3db_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/terminate"
+}
+
+
+# Setup
+install_deps
+populate_CSAR_m3db_helm "$csar_id"
+
+#terminateOrchData
+deleteData
+createData
+instantiate
+
+
+
+
+
+
+
diff --git a/kud/tests/cluster2-m3db-operator-test.sh b/kud/tests/cluster2-m3db-operator-test.sh
new file mode 100755
index 00000000..54ca4340
--- /dev/null
+++ b/kud/tests/cluster2-m3db-operator-test.sh
@@ -0,0 +1,386 @@
+# /*
+# * Copyright 2020 Intel Corporation, Inc
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+
+source _common_test.sh
+source _functions.sh
+source _common.sh
+
+# base_url_orchestrator=${base_url_orchestrator:-"http://localhost:9015/v2"}
+# base_url_clm=${base_url_clm:-"http://localhost:9019/v2"}
+# base_url_ncm=${base_url_ncm:-"http://localhost:9016/v2"}
+
+base_url_clm=${base_url_clm:-"http://192.168.121.29:30073/v2"}
+base_url_ncm=${base_url_ncm:-"http://192.168.121.29:31955/v2"}
+base_url_orchestrator=${base_url_orchestrator:-"http://192.168.121.29:32447/v2"}
+base_url_rysnc=${base_url_orchestrator:-"http://192.168.121.29:32002/v2"}
+
+
+CSAR_DIR="/opt/csar"
+csar_id="operators-cb009bfe-bbee-11e8-9766-525400435678"
+
+
+app1_helm_path="$CSAR_DIR/$csar_id/operator.tar.gz"
+app1_profile_path="$CSAR_DIR/$csar_id/operator_profile.tar.gz"
+
+
+
+# ---------BEGIN: SET CLM DATA---------------
+
+clusterprovidername="collection-cluster2-provider"
+clusterproviderdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$clusterprovidername",
+ "description": "description of $clusterprovidername",
+ "userData1": "$clusterprovidername user data 1",
+ "userData2": "$clusterprovidername user data 2"
+ }
+}
+EOF
+)"
+
+clustername="cluster2"
+clusterdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$clustername",
+ "description": "description of $clustername",
+ "userData1": "$clustername user data 1",
+ "userData2": "$clustername user data 2"
+ }
+}
+EOF
+)"
+
+kubeconfigcluster2="/opt/kud/multi-cluster/cluster2/artifacts/admin.conf"
+
+labelname="LabelCluster2"
+labeldata="$(cat<<EOF
+{"label-name": "$labelname"}
+EOF
+)"
+
+#--TODO--Creating provider network and network intents----
+
+# add the rsync controller entry
+rsynccontrollername="rsync"
+rsynccontrollerdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "rsync",
+ "description": "description of $rsynccontrollername controller",
+ "userData1": "user data 1 for $rsynccontrollername",
+ "userData2": "user data 2 for $rsynccontrollername"
+ },
+ "spec": {
+ "host": "${rsynccontrollername}",
+ "port": 9041
+ }
+}
+EOF
+)"
+
+# ------------END: SET CLM DATA--------------
+
+
+#-------------BEGIN:SET ORCH DATA------------------
+
+# define a project
+projectname="OperatorsProjectCluster2"
+projectdata="$(cat<<EOF
+{
+ "metadata": {
+ "name": "$projectname",
+ "description": "description of $projectname controller",
+ "userData1": "$projectname user data 1",
+ "userData2": "$projectname user data 2"
+ }
+}
+EOF
+)"
+
+# define a composite application
+operators_compositeapp_name="OperatorsCompositeApp"
+compositeapp_version="v1"
+compositeapp_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${operators_compositeapp_name}",
+ "description": "description of ${operators_compositeapp_name}",
+ "userData1": "user data 1 for ${operators_compositeapp_name}",
+ "userData2": "user data 2 for ${operators_compositeapp_name}"
+ },
+ "spec":{
+ "version":"${compositeapp_version}"
+ }
+}
+EOF
+)"
+
+# add operator into operators compositeApp
+
+
+operator_app_name="operator"
+operator_helm_chart=${app1_helm_path}
+
+operator_app_data="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${operator_app_name}",
+ "description": "description for app ${operator_app_name}",
+ "userData1": "user data 2 for ${operator_app_name}",
+ "userData2": "user data 2 for ${operator_app_name}"
+ }
+}
+EOF
+)"
+
+
+# Add the composite profile
+operators_composite_profile_name="operators_composite-profile"
+operators_composite_profile_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${operators_composite_profile_name}",
+ "description":"description of ${operators_composite_profile_name}",
+ "userData1":"user data 1 for ${operators_composite_profile_name}",
+ "userData2":"user data 2 for ${operators_composite_profile_name}"
+ }
+}
+EOF
+)"
+
+# Add the operator profile data into operators composite profile data
+operator_profile_name="operator-profile"
+operator_profile_file=$app1_profile_path
+operator_profile_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${operator_profile_name}",
+ "description":"description of ${operator_profile_name}",
+ "userData1":"user data 1 for ${operator_profile_name}",
+ "userData2":"user data 2 for ${operator_profile_name}"
+ },
+ "spec":{
+ "app-name": "${operator_app_name}"
+ }
+}
+EOF
+)"
+
+
+
+# define the generic placement intent
+generic_placement_intent_name="Operators-generic-placement-intent"
+generic_placement_intent_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${generic_placement_intent_name}",
+ "description":"${generic_placement_intent_name}",
+ "userData1":"${generic_placement_intent_name}",
+ "userData2":"${generic_placement_intent_name}"
+ },
+ "spec":{
+ "logical-cloud":"unused_logical_cloud"
+ }
+}
+EOF
+)"
+
+# define placement intent for operator sub-app
+operator_placement_intent_name="operator-placement-intent"
+operator_placement_intent_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${operator_placement_intent_name}",
+ "description":"description of ${operator_placement_intent_name}",
+ "userData1":"user data 1 for ${operator_placement_intent_name}",
+ "userData2":"user data 2 for ${operator_placement_intent_name}"
+ },
+ "spec":{
+ "app-name":"${operator_app_name}",
+ "intent":{
+ "allOf":[
+ { "provider-name":"${clusterprovidername}",
+ "cluster-label-name":"${labelname}"
+ }
+ ]
+ }
+ }
+}
+EOF
+)"
+
+
+# define a deployment intent group
+release="operators"
+deployment_intent_group_name="operators_deployment_intent_group"
+deployment_intent_group_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${deployment_intent_group_name}",
+ "description":"descriptiont of ${deployment_intent_group_name}",
+ "userData1":"user data 1 for ${deployment_intent_group_name}",
+ "userData2":"user data 2 for ${deployment_intent_group_name}"
+ },
+ "spec":{
+ "profile":"${operators_composite_profile_name}",
+ "version":"${release}",
+ "override-values":[]
+ }
+}
+EOF
+)"
+
+# define the intents to be used by the group
+deployment_intents_in_group_name="operators_deploy_intents"
+deployment_intents_in_group_data="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${deployment_intents_in_group_name}",
+ "description":"descriptionf of ${deployment_intents_in_group_name}",
+ "userData1":"user data 1 for ${deployment_intents_in_group_name}",
+ "userData2":"user data 2 for ${deployment_intents_in_group_name}"
+ },
+ "spec":{
+ "intent":{
+ "genericPlacementIntent":"${generic_placement_intent_name}"
+ }
+ }
+}
+EOF
+)"
+
+
+#---------END: SET ORCH DATA--------------------
+
+
+function createOrchestratorData {
+
+ print_msg "creating controller entries"
+ call_api -d "${rsynccontrollerdata}" "${base_url_orchestrator}/controllers"
+
+ print_msg "creating project entry"
+ call_api -d "${projectdata}" "${base_url_orchestrator}/projects"
+
+ print_msg "creating operators composite app entry"
+ call_api -d "${compositeapp_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps"
+
+ print_msg "adding operator sub-app to the composite app"
+ call_api -F "metadata=${operator_app_data}" \
+ -F "file=@${operator_helm_chart}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/apps"
+
+
+ print_msg "creating operators composite profile entry"
+ call_api -d "${operators_composite_profile_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/composite-profiles"
+
+ print_msg "adding operator sub-app profile to the composite profile"
+ call_api -F "metadata=${operator_profile_data}" \
+ -F "file=@${operator_profile_file}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/composite-profiles/${operators_composite_profile_name}/profiles"
+
+
+ print_msg "create the generic placement intent"
+ call_api -d "${generic_placement_intent_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/generic-placement-intents"
+ print_msg "add the operator app placement intent to the generic placement intent"
+ call_api -d "${operator_placement_intent_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents"
+
+
+ print_msg "create the deployment intent group"
+ call_api -d "${deployment_intent_group_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/deployment-intent-groups"
+ call_api -d "${deployment_intents_in_group_data}" \
+ "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents"
+
+}
+
+function deleteOrchestratorData {
+ # TODO- delete rsync controller and any other controller
+ delete_resource "${base_url_orchestrator}/controllers/${rsynccontrollername}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}"
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${operator_placement_intent_name}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/composite-profiles/${operators_composite_profile_name}/profiles/${operator_profile_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/composite-profiles/${operators_composite_profile_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/apps/${operator_app_name}"
+
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}"
+
+ delete_resource "${base_url_orchestrator}/projects/${projectname}"
+}
+
+function createClmData {
+ print_msg "Creating cluster provider and cluster"
+ call_api -d "${clusterproviderdata}" "${base_url_clm}/cluster-providers"
+ call_api -H "Content-Type: multipart/form-data" -F "metadata=$clusterdata" -F "file=@$kubeconfigcluster2" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters"
+ call_api -d "${labeldata}" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels"
+
+
+}
+
+function deleteClmData {
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels/${labelname}"
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}"
+ delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}"
+}
+function createData {
+ createClmData
+ createOrchestratorData
+}
+function deleteData {
+ deleteClmData
+ deleteOrchestratorData
+}
+function instantiate {
+ # call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/approve"
+ call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"
+}
+
+
+function terminateOrchData {
+ call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${operators_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/terminate"
+ }
+
+# Setup
+install_deps
+populate_CSAR_operator_helm "$csar_id"
+
+
+#terminateOrchData
+deleteData
+createData
+instantiate
+
diff --git a/kud/tests/prometheus-test.sh b/kud/tests/prometheus-test.sh
index ca995ba0..b746b88e 100755
--- a/kud/tests/prometheus-test.sh
+++ b/kud/tests/prometheus-test.sh
@@ -25,10 +25,12 @@ source _functions.sh
source _common.sh
-base_url_clm=${base_url_clm:-"http://192.168.121.29:32551/v2"}
-base_url_ncm=${base_url_ncm:-"http://192.168.121.29:31406/v2"}
-base_url_orchestrator=${base_url_orchestrator:-"http://192.168.121.29:30356/v2"}
-base_url_rysnc=${base_url_orchestrator:-"http://192.168.121.29:31751/v2"}
+base_url_clm=${base_url_clm:-"http://192.168.121.29:30073/v2"}
+base_url_ncm=${base_url_ncm:-"http://192.168.121.29:31955/v2"}
+base_url_orchestrator=${base_url_orchestrator:-"http://192.168.121.29:32447/v2"}
+base_url_rysnc=${base_url_orchestrator:-"http://192.168.121.29:32002/v2"}
+
+
CSAR_DIR="/opt/csar"
csar_id="cb009bfe-bbee-11e8-9766-525400435678"
@@ -37,8 +39,6 @@ app1_helm_path="$CSAR_DIR/$csar_id/prometheus-operator.tar.gz"
app1_profile_path="$CSAR_DIR/$csar_id/prometheus-operator_profile.tar.gz"
app2_helm_path="$CSAR_DIR/$csar_id/collectd.tar.gz"
app2_profile_path="$CSAR_DIR/$csar_id/collectd_profile.tar.gz"
-app3_helm_path="$CSAR_DIR/$csar_id/m3db.tar.gz"
-app3_profile_path="$CSAR_DIR/$csar_id/m3db_profile.tar.gz"
# ---------BEGIN: SET CLM DATA---------------
@@ -212,23 +212,6 @@ collectd_app_data="$(cat <<EOF
EOF
)"
-# add app entries for the m3db app into
-# compositeApp
-
-m3db_app_name="m3db"
-m3db_helm_chart=${app3_helm_path}
-
-m3db_app_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${m3db_app_name}",
- "description": "description for app ${m3db_app_name}",
- "userData1": "user data 2 for ${m3db_app_name}",
- "userData2": "user data 2 for ${m3db_app_name}"
- }
-}
-EOF
-)"
# Add the composite profile
collection_composite_profile_name="collection_composite-profile"
@@ -280,23 +263,6 @@ collectd_profile_data="$(cat <<EOF
EOF
)"
-# Add the m3db profile data into collection profile data
-m3db_profile_name="m3db-profile"
-m3db_profile_file=$app3_profile_path
-m3db_profile_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${m3db_profile_name}",
- "description":"description of ${m3db_profile_name}",
- "userData1":"user data 1 for ${m3db_profile_name}",
- "userData2":"user data 2 for ${m3db_profile_name}"
- },
- "spec":{
- "app-name": "${m3db_app_name}"
- }
-}
-EOF
-)"
# define the generic placement intent
generic_placement_intent_name="generic-placement-intent"
@@ -363,29 +329,6 @@ collectd_placement_intent_data="$(cat <<EOF
EOF
)"
-# define app placement intent for m3db
-m3db_placement_intent_name="m3db-placement-intent"
-m3db_placement_intent_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${m3db_placement_intent_name}",
- "description":"description of ${m3db_placement_intent_name}",
- "userData1":"user data 1 for ${m3db_placement_intent_name}",
- "userData2":"user data 2 for ${m3db_placement_intent_name}"
- },
- "spec":{
- "app-name":"${m3db_app_name}",
- "intent":{
- "allOf":[
- { "provider-name":"${clusterprovidername}",
- "cluster-label-name":"${labelname3}"
- }
- ]
- }
- }
-}
-EOF
-)"
# define a deployment intent group
release="collection"
@@ -450,12 +393,6 @@ function createOrchestratorData {
-F "file=@${collectd_helm_chart}" \
"${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps"
-
- print_msg "adding m3db app to the composite app"
- call_api -F "metadata=${m3db_app_data}" \
- -F "file=@${m3db_helm_chart}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps"
-
print_msg "creating collection composite profile entry"
call_api -d "${collection_composite_profile_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles"
@@ -469,11 +406,6 @@ function createOrchestratorData {
-F "file=@${collectd_profile_file}" \
"${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles"
- print_msg "adding m3db app profiles to the composite profile"
- call_api -F "metadata=${m3db_profile_data}" \
- -F "file=@${m3db_profile_file}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles"
-
print_msg "create the generic placement intent"
call_api -d "${generic_placement_intent_data}" \
"${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/generic-placement-intents"
@@ -486,10 +418,6 @@ function createOrchestratorData {
call_api -d "${collectd_placement_intent_data}" \
"${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents"
- print_msg "add the collectd app placement intent to the generic placement intent"
- call_api -d "${m3db_placement_intent_data}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents"
-
print_msg "create the deployment intent group"
call_api -d "${deployment_intent_group_data}" \
@@ -508,14 +436,12 @@ function deleteOrchestratorData {
delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${prometheus_placement_intent_name}"
delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${collectd_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${m3db_placement_intent_name}"
delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/generic-placement-intents/${generic_placement_intent_name}"
delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${prometheus_profile_name}"
delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${collectd_profile_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${m3db_profile_name}"
delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}"
@@ -523,7 +449,6 @@ function deleteOrchestratorData {
delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${collectd_app_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${m3db_app_name}"
delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}"
delete_resource "${base_url_orchestrator}/projects/${projectname}"
@@ -584,10 +509,16 @@ function instantiate {
call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"
}
+
+function terminateOrchData {
+ call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/terminate"
+}
+
# Setup
install_deps
populate_CSAR_composite_app_helm "$csar_id"
+#terminateOrchData
deleteData
createData
instantiate
diff --git a/kud/tests/sanity-check-for-v2.sh b/kud/tests/sanity-check-for-v2.sh
index d350f712..994b86b1 100755
--- a/kud/tests/sanity-check-for-v2.sh
+++ b/kud/tests/sanity-check-for-v2.sh
@@ -494,8 +494,9 @@ fi
case "$1" in
"start" )
+ setup
deleteData
- print_msg "deleting the data success"
+ print_msg "Before creating, deleting the data success"
createData
print_msg "creating the data success"
instantiate
@@ -504,6 +505,8 @@ case "$1" in
"stop" )
terminateOrchData
print_msg "terminated the resources"
+ deleteData
+ print_msg "deleting the data success"
;;
*) usage ;;
esac
diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml
index c66cb131..f997309c 100644
--- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml
+++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/add_m3db_remote.yaml
@@ -1,6 +1,6 @@
spec:
remoteWrite:
- - url: "http://m3coordinator-m3db-cluster.training.svc.cluster.local:7201/api/v1/prom/remote/write"
+ - url: "http://192.168.121.15:32701/api/v1/prom/remote/write"
writeRelabelConfigs:
- targetLabel: metrics_storage
- replacement: m3db_remote \ No newline at end of file
+ replacement: m3db_remote
diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml
index d4ff7b30..a2642660 100755
--- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml
+++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/charts/grafana/values.yaml
@@ -61,7 +61,7 @@ image:
# - myRegistrKeySecretName
testFramework:
- enabled: true
+ enabled: false
image: "bats/bats"
tag: "v1.1.0"
imagePullPolicy: IfNotPresent
diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/diff.txt b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/diff.txt
deleted file mode 100644
index a0d0d3ed..00000000
--- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/diff.txt
+++ /dev/null
@@ -1,145 +0,0 @@
-8a9,12
-> ## Override the deployment namespace
-> ##
-> namespaceOverride: ""
->
-33a38
-> kubeApiserverSlos: true
-96c101
-< enabled: false
----
-> enabled: true
-107a113
-> annotations: {}
-168c174
-< # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
----
-> # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
-169a176,177
-> # {{ end }}
-> # {{ end }}
-317c325
-< ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
----
-> ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
-443a452
-> runAsGroup: 2000
-473c482,483
-< enabled: false
----
-> enabled: true
-> namespaceOverride: ""
-539c549
-< ## Configure additional grafana datasources
----
-> ## Configure additional grafana datasources (passed through tpl)
-552c562
-< # url: https://prometheus.svc:9090
----
-> # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090
-641a652,660
-> ## Enable scraping /metrics/probes from kubelet's service
-> ##
-> probes: true
->
-> ## Enable scraping /metrics/resource from kubelet's service
-> ##
-> resource: true
-> # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
-> resourcePath: "/metrics/resource/v1alpha1"
-655a675,688
-> ## Metric relabellings to apply to samples before ingestion
-> ##
-> probesMetricRelabelings: []
-> # - sourceLabels: [__name__, image]
-> # separator: ;
-> # regex: container_([a-z_]+);
-> # replacement: $1
-> # action: drop
-> # - sourceLabels: [__name__]
-> # separator: ;
-> # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
-> # replacement: $1
-> # action: drop
->
-668a702,721
-> probesRelabelings:
-> - sourceLabels: [__metrics_path__]
-> targetLabel: metrics_path
-> # - sourceLabels: [__meta_kubernetes_pod_node_name]
-> # separator: ;
-> # regex: ^(.*)$
-> # targetLabel: nodename
-> # replacement: $1
-> # action: replace
->
-> resourceRelabelings:
-> - sourceLabels: [__metrics_path__]
-> targetLabel: metrics_path
-> # - sourceLabels: [__meta_kubernetes_pod_node_name]
-> # separator: ;
-> # regex: ^(.*)$
-> # targetLabel: nodename
-> # replacement: $1
-> # action: replace
->
-986c1039
-< enabled: false
----
-> enabled: true
-1011a1065
-> namespaceOverride: ""
-1020c1074
-< enabled: false
----
-> enabled: true
-1056a1111
-> namespaceOverride: ""
-1070a1126
-> # Only for prometheusOperator.image.tag < v0.39.0
-1094c1150
-< tag: v1.2.0
----
-> tag: v1.2.1
-1165c1221
-< cleanupCustomResource: true
----
-> cleanupCustomResource: false
-1255a1312,1313
-> fsGroup: 65534
-> runAsGroup: 65534
-1340c1398
-< type: NodePort
----
-> type: ClusterIP
-1510c1568
-< tag: v2.16.0
----
-> tag: v2.18.1
-1686c1744
-< ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
----
-> ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
-1750a1809,1813
-> # Additional volumes on the output StatefulSet definition.
-> volumes: []
-> # Additional VolumeMounts on the output StatefulSet definition.
-> volumeMounts: []
->
-1828a1892
-> runAsGroup: 2000
-1868,1879c1932
-< additionalServiceMonitors:
-< - name: service-monitor-cadvisor
-< additionalLabels:
-< collector: cadvisor
-< jobLabel: cadvisor
-< selector:
-< matchLabels:
-< app: cadvisor
-< endpoints:
-< - port: cadvisor-prometheus
-< interval: 10s
-< path: /metrics
----
-> additionalServiceMonitors: []
diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml
index f296ca50..40de0165 100755
--- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml
+++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus-operator/values.yaml
@@ -474,7 +474,7 @@ alertmanager:
## Using default values from https://github.com/helm/charts/blob/master/stable/grafana/values.yaml
##
grafana:
- enabled: false
+ enabled: true
## Deploy default dashboards.
##
@@ -589,7 +589,7 @@ grafana:
## Component scraping the kube api server
##
kubeApiServer:
- enabled: false
+ enabled: true
tlsConfig:
serverName: kubernetes
insecureSkipVerify: false
@@ -626,7 +626,7 @@ kubeApiServer:
## Component scraping the kubelet and kubelet-hosted cAdvisor
##
kubelet:
- enabled: false
+ enabled: true
namespace: kube-system
serviceMonitor:
@@ -698,7 +698,7 @@ kubelet:
## Component scraping the kube controller manager
##
kubeControllerManager:
- enabled: false
+ enabled: true
## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
##
@@ -751,7 +751,7 @@ kubeControllerManager:
## Component scraping coreDns. Use either this or kubeDns
##
coreDns:
- enabled: false
+ enabled: true
service:
port: 9153
targetPort: 9153
@@ -831,7 +831,7 @@ kubeDns:
## Component scraping etcd
##
kubeEtcd:
- enabled: false
+ enabled: true
## If your etcd is not deployed as a pod, specify IPs it can be found on
##
@@ -891,7 +891,7 @@ kubeEtcd:
## Component scraping kube scheduler
##
kubeScheduler:
- enabled: false
+ enabled: true
## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
##
@@ -944,7 +944,7 @@ kubeScheduler:
## Component scraping kube proxy
##
kubeProxy:
- enabled: false
+ enabled: true
## If your kube proxy is not deployed as a pod, specify IPs it can be found on
##
@@ -1731,7 +1731,7 @@ prometheus:
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotewritespec
remoteWrite:
# - url: http://remote1/push
- - url: "http://m3coordinator-m3db-cluster.training.svc.cluster.local:7201/api/v1/prom/remote/write"
+ - url: "http://192.168.121.15:32701/api/v1/prom/remote/write"
writeRelabelConfigs:
- targetLabel: metrics_storage
replacement: m3db_remote
@@ -1769,13 +1769,16 @@ prometheus:
## The scrape configuraiton example below will find master nodes, provided they have the name .*mst.*, relabel the
## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes
##
- additionalScrapeConfigs:
- - job_name: 'm3db'
- static_configs:
- - targets: ['m3db-cluster-rep0-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004','m3db-cluster-rep1-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004', 'm3db-cluster-rep2-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004']
- - job_name: 'm3coordinator'
+ additionalScrapeConfigs:
+ - job_name: 'm3'
static_configs:
- - targets: ['m3db-cluster-rep0-0.m3dbnode-m3db-cluster.training.svc.cluster.local:7203']
+ - targets: ['192.168.121.15:32701']
+ #- job_name: 'm3db'
+ #static_configs:
+ #- targets: ['m3db-cluster-rep0-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004','m3db-cluster-rep1-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004', 'm3db-cluster-rep2-0.m3dbnode-m3db-cluster.training.svc.cluster.local:9004']
+ #- job_name: 'm3coordinator'
+ #static_configs:
+ #- targets: ['m3db-cluster-rep0-0.m3dbnode-m3db-cluster.training.svc.cluster.local:7203']
# - job_name: kube-etcd
# kubernetes_sd_configs:
# - role: node
diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/.helmignore b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/.helmignore
deleted file mode 100644
index 50af0317..00000000
--- a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/.helmignore
+++ /dev/null
@@ -1,22 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/NOTES.txt b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/NOTES.txt
deleted file mode 100644
index ee7ee3d7..00000000
--- a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/NOTES.txt
+++ /dev/null
@@ -1 +0,0 @@
-M3DB Cluster {{ .Values.m3dbCluster.name }} has been created \ No newline at end of file
diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/_helpers.tpl b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/_helpers.tpl
deleted file mode 100644
index 36544b12..00000000
--- a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/_helpers.tpl
+++ /dev/null
@@ -1,32 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "m3db.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "m3db.fullname" -}}
-{{- if .Values.fullnameOverride -}}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- if contains $name .Release.Name -}}
-{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "m3db.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/configmap.yaml b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/configmap.yaml
deleted file mode 100644
index 358e201e..00000000
--- a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/configmap.yaml
+++ /dev/null
@@ -1,218 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ .Values.m3dbCluster.configMapName }}
-data:
- m3.yml: |+
- coordinator:
- listenAddress:
- type: "config"
- value: "0.0.0.0:7201"
- metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
- tagOptions:
- idScheme: quoted
- local:
- namespaces:
- - namespace: "default"
- type: unaggregated
- retention: 48h
- db:
- logging:
- level: info
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
- hostID:
- resolver: file
- file:
- path: /etc/m3db/pod-identity/identity
- timeout: 5m
-
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
- writeTimeout: 10s
- fetchTimeout: 15s
- connectTimeout: 20s
- writeRetry:
- initialBackoff: 500ms
- backoffFactor: 3
- maxRetries: 2
- jitter: true
- fetchRetry:
- initialBackoff: 500ms
- backoffFactor: 2
- maxRetries: 3
- jitter: true
- backgroundHealthCheckFailLimit: 4
- backgroundHealthCheckFailThrottleFactor: 0.5
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- bootstrap:
- bootstrappers:
- - filesystem
- - commitlog
- - peers
- - uninitialized_topology
- fs:
- numProcessorsPerCPU: 0.125
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
- blockSize: 10m
-
- fs:
- filePathPrefix: /var/lib/m3db
- writeBufferSize: 65536
- dataReadBufferSize: 65536
- infoReadBufferSize: 128
- seekReadBufferSize: 4096
- throughputLimitMbps: 100.0
- throughputCheckEvery: 128
-
- repair:
- enabled: false
- interval: 2h
- offset: 30m
- jitter: 1h
- throttle: 2m
- checkInterval: 1m
-
- pooling:
- blockAllocSize: 16
- type: simple
- seriesPool:
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- blockPool:
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- encoderPool:
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- closersPool:
- size: 104857
- lowWatermark: 0.7
- highWatermark: 1.0
- contextPool:
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- segmentReaderPool:
- size: 16384
- lowWatermark: 0.7
- highWatermark: 1.0
- iteratorPool:
- size: 2048
- lowWatermark: 0.7
- highWatermark: 1.0
- fetchBlockMetadataResultsPool:
- size: 65536
- capacity: 32
- lowWatermark: 0.7
- highWatermark: 1.0
- fetchBlocksMetadataResultsPool:
- size: 32
- capacity: 4096
- lowWatermark: 0.7
- highWatermark: 1.0
- hostBlockMetadataSlicePool:
- size: 131072
- capacity: 3
- lowWatermark: 0.7
- highWatermark: 1.0
- blockMetadataPool:
- size: 65536
- lowWatermark: 0.7
- highWatermark: 1.0
- blockMetadataSlicePool:
- size: 65536
- capacity: 32
- lowWatermark: 0.7
- highWatermark: 1.0
- blocksMetadataPool:
- size: 65536
- lowWatermark: 0.7
- highWatermark: 1.0
- blocksMetadataSlicePool:
- size: 32
- capacity: 4096
- lowWatermark: 0.7
- highWatermark: 1.0
- identifierPool:
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- bytesPool:
- buckets:
- - capacity: 16
- size: 524288
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 32
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 64
- size: 131072
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 128
- size: 65536
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 256
- size: 65536
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 1440
- size: 16384
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 4096
- size: 8192
- lowWatermark: 0.7
- highWatermark: 1.0
- config:
- service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - http://etcd-0.etcd:2380
- - http://etcd-1.etcd:2380
- - http://etcd-2.etcd:2380 \ No newline at end of file
diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/etcd-cluster.yaml b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/etcd-cluster.yaml
deleted file mode 100644
index 802354bf..00000000
--- a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/etcd-cluster.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-apiVersion: "etcd.database.coreos.com/v1beta2"
-kind: EtcdCluster
-metadata:
- name: {{ .Release.Name }}-{{ .Values.etcdCluster.name }}
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "m3db.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
- annotations:
- etcd.database.coreos.com/scope: clusterwide
-spec:
- size: {{ .Values.etcdCluster.size }}
- version: "{{ .Values.etcdCluster.version }}"
- pod:
-{{ toYaml .Values.etcdCluster.pod | indent 4 }}
- {{- if .Values.etcdCluster.enableTLS }}
- TLS:
-{{ toYaml .Values.etcdCluster.tls | indent 4 }}
- {{- end }}
diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/values.yaml b/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/values.yaml
deleted file mode 100644
index 515fd2ad..00000000
--- a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/values.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-m3dbCluster:
- name: m3db-cluster
- image:
- repository: quay.io/m3db/m3dbnode
- tag: v0.10.2
- replicationFactor: 3
- numberOfShards: 256
- isolationGroups:
- - name: us-west1-a
- numInstances: 1
- - name: us-west1-b
- numInstances: 1
- - name: us-west1-c
- numInstances: 1
- namespaces:
- - name: collectd
- preset: 10s:2d
- configMapName: m3-configuration
- tolerations: {}
-
-etcdCluster:
- name: etcd
- size: 3
- version: 3.4.3
- image:
- repository: quay.io/coreos/etcd
- tag: v3.4.3
- pullPolicy: Always
- enableTLS: false
- # TLS configs
- tls:
- static:
- member:
- peerSecret: etcd-peer-tls
- serverSecret: etcd-server-tls
- operatorSecret: etcd-client-tls
- ## etcd cluster pod specific values
- ## Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-members-cluster-with-resource-requirement
- pod:
- ## Antiaffinity for etcd pod assignment
- ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
- antiAffinity: false
- resources:
- limits:
- cpu: 100m
- memory: 128Mi
- requests:
- cpu: 100m
- memory: 128Mi
- ## Node labels for etcd pod assignment
- ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
- nodeSelector: {}
diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/Chart.yaml b/kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/Chart.yaml
index 10d9d542..10d9d542 100644
--- a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/Chart.yaml
+++ b/kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/Chart.yaml
diff --git a/kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/del.yaml b/kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/del.yaml
new file mode 100644
index 00000000..86317508
--- /dev/null
+++ b/kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/del.yaml
@@ -0,0 +1,49 @@
+---
+# Source: m3db/templates/m3dbcluster.yaml
+apiVersion: operator.m3db.io/v1alpha1
+kind: M3DBCluster
+metadata:
+ name: m3db-cluster
+spec:
+ image: quay.io/m3db/m3dbnode:latest
+ replicationFactor: 3
+ numberOfShards: 256
+ isolationGroups:
+ - name: us-west1-a
+ nodeAffinityTerms:
+ - key: failure-domain.beta.kubernetes.io/region
+ values:
+ - us-west1-a
+ numInstances: 1
+ - name: us-west1-b
+ nodeAffinityTerms:
+ - key: failure-domain.beta.kubernetes.io/region
+ values:
+ - us-west1-b
+ numInstances: 1
+ - name: us-west1-c
+ nodeAffinityTerms:
+ - key: failure-domain.beta.kubernetes.io/region
+ values:
+ - us-west1-c
+ numInstances: 1
+
+ tolerations:
+ {}
+
+ namespaces:
+ - name: collectd
+ preset: 10s:2d
+
+ etcdEndpoints:
+ - http://etcd-0.etcd:2379
+ - http://etcd-1.etcd:2379
+ - http://etcd-2.etcd:2379
+ containerResources:
+ requests:
+ memory: 4Gi
+ cpu: '1'
+ limits:
+ memory: 32Gi
+ cpu: '4'
+
diff --git a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/m3dbcluster.yaml b/kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/templates/m3dbcluster.yaml
index 8ce16a74..c5da0307 100644
--- a/kud/tests/vnfs/comp-app/collection/app3/helm/m3db/templates/m3dbcluster.yaml
+++ b/kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/templates/m3dbcluster.yaml
@@ -1,26 +1,23 @@
apiVersion: operator.m3db.io/v1alpha1
kind: M3DBCluster
metadata:
- name: {{ .Values.m3dbCluster.name }}
+ name: m3db-cluster
spec:
image: {{ .Values.m3dbCluster.image.repository }}:{{ .Values.m3dbCluster.image.tag }}
replicationFactor: {{ .Values.m3dbCluster.replicationFactor }}
numberOfShards: {{ .Values.m3dbCluster.numberOfShards }}
- etcdEndpoints:
- - http://{{ .Release.Name }}-{{ .Values.etcdCluster.name }}:2379
isolationGroups:
{{ toYaml .Values.m3dbCluster.isolationGroups | indent 4 }}
- tolerations:
-{{ toYaml .Values.m3dbCluster.tolerations | indent 4 }}
namespaces:
-{{ toYaml .Values.m3dbCluster.namespaces | indent 4 }}
- configMapName: {{ .Values.m3dbCluster.configMapName }}
- resources:
+{{ toYaml .Values.m3dbCluster.namespaces | indent 4 }}
+ etcdEndpoints:
+ - http://etcd-0.etcd:2379
+ - http://etcd-1.etcd:2379
+ - http://etcd-2.etcd:2379
+ containerResources:
requests:
memory: 4Gi
cpu: '1'
limits:
- memory: 12Gi
+ memory: 32Gi
cpu: '4'
-
-
diff --git a/kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/values.yaml b/kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/values.yaml
new file mode 100644
index 00000000..faa2a8b2
--- /dev/null
+++ b/kud/tests/vnfs/comp-app/collection/m3db/helm/m3db/values.yaml
@@ -0,0 +1,29 @@
+m3dbCluster:
+ name: m3db-cluster
+ image:
+ repository: quay.io/m3db/m3dbnode
+ tag: latest
+ replicationFactor: 3
+ numberOfShards: 256
+ isolationGroups:
+ - name: us-west1-a
+ numInstances: 1
+ nodeAffinityTerms:
+ - key: failure-domain.beta.kubernetes.io/region
+ values:
+ - us-west1-a
+ - name: us-west1-b
+ numInstances: 1
+ nodeAffinityTerms:
+ - key: failure-domain.beta.kubernetes.io/region
+ values:
+ - us-west1-b
+ - name: us-west1-c
+ numInstances: 1
+ nodeAffinityTerms:
+ - key: failure-domain.beta.kubernetes.io/region
+ values:
+ - us-west1-c
+ namespaces:
+ - name: collectd
+ preset: 10s:2d
diff --git a/kud/tests/vnfs/comp-app/collection/app3/profile/manifest.yaml b/kud/tests/vnfs/comp-app/collection/m3db/profile/manifest.yaml
index 4d381d02..4d381d02 100644
--- a/kud/tests/vnfs/comp-app/collection/app3/profile/manifest.yaml
+++ b/kud/tests/vnfs/comp-app/collection/m3db/profile/manifest.yaml
diff --git a/kud/tests/vnfs/comp-app/collection/app3/profile/override_values.yaml b/kud/tests/vnfs/comp-app/collection/m3db/profile/override_values.yaml
index 041fc40d..041fc40d 100644
--- a/kud/tests/vnfs/comp-app/collection/app3/profile/override_values.yaml
+++ b/kud/tests/vnfs/comp-app/collection/m3db/profile/override_values.yaml
diff --git a/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/Chart.yaml b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/Chart.yaml
new file mode 100644
index 00000000..67ea549e
--- /dev/null
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/Chart.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+appVersion: '>0.4.7'
+description: Kubernetes operator for M3DB timeseries database
+home: https://github.com/m3db/m3db-operator
+icon: https://raw.githubusercontent.com/m3db/m3/master/docs/theme/assets/images/M3-logo.png
+keywords:
+- operator
+- m3
+maintainers:
+- email: m3db@googlegroups.com
+ name: m3 Authors
+ url: https://operator.m3db.io/
+name: m3db-operator
+sources:
+- https://github.com/m3db/m3db-operator
+version: 0.8.0
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/LICENSE b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/LICENSE
index 261eeb9e..261eeb9e 100644
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/LICENSE
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/LICENSE
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/NOTES.txt b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/NOTES.txt
index ca4143db..ca4143db 100644
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/NOTES.txt
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/NOTES.txt
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/README.md b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/README.md
index 0a532d31..0a532d31 100644
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/README.md
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/README.md
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/cluster_role.yaml b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/cluster_role.yaml
index 7bf41739..5c000f98 100644
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/cluster_role.yaml
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/cluster_role.yaml
@@ -20,7 +20,7 @@ rules:
verbs: ["create", "get", "deletecollection", "delete"]
- apiGroups: [""]
resources: ["pods"]
- verbs: ["list", "get", "watch", "update"]
+ verbs: ["list", "get", "watch", "update", "patch"]
- apiGroups: ["apps"]
resources: ["statefulsets", "deployments"]
verbs: ["*"]
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/cluster_role_binding.yaml b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/cluster_role_binding.yaml
index 876a6705..876a6705 100644
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/cluster_role_binding.yaml
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/cluster_role_binding.yaml
diff --git a/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/etcd-cluster/etcd-basic.yaml b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/etcd-cluster/etcd-basic.yaml
new file mode 100644
index 00000000..485dd1db
--- /dev/null
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/etcd-cluster/etcd-basic.yaml
@@ -0,0 +1,86 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: etcd
+ labels:
+ app: etcd
+spec:
+ ports:
+ - port: 2379
+ name: client
+ - port: 2380
+ name: peer
+ clusterIP: None
+ selector:
+ app: etcd
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: etcd-cluster
+ labels:
+ app: etcd
+spec:
+ selector:
+ app: etcd
+ ports:
+ - port: 2379
+ protocol: TCP
+ type: ClusterIP
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: etcd
+ labels:
+ app: etcd
+spec:
+ serviceName: "etcd"
+ replicas: 3
+ selector:
+ matchLabels:
+ app: etcd
+ template:
+ metadata:
+ labels:
+ app: etcd
+ spec:
+ containers:
+ - name: etcd
+ image: quay.io/coreos/etcd:v3.3.10
+ command:
+ - "etcd"
+ - "--name"
+ - "$(MY_POD_NAME)"
+ - "--listen-peer-urls"
+ - "http://$(MY_IP):2380"
+ - "--listen-client-urls"
+ - "http://$(MY_IP):2379,http://127.0.0.1:2379"
+ - "--advertise-client-urls"
+ - "http://$(MY_POD_NAME).etcd:2379"
+ - "--initial-cluster-token"
+ - "etcd-cluster-1"
+ - "--initial-advertise-peer-urls"
+ - "http://$(MY_POD_NAME).etcd:2380"
+ - "--initial-cluster"
+ - "etcd-0=http://etcd-0.etcd:2380,etcd-1=http://etcd-1.etcd:2380,etcd-2=http://etcd-2.etcd:2380"
+ - "--initial-cluster-state"
+ - "new"
+ - "--data-dir"
+ - "/var/lib/etcd"
+ ports:
+ - containerPort: 2379
+ name: client
+ - containerPort: 2380
+ name: peer
+ env:
+ - name: MY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: ETCDCTL_API
+ value: "3"
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/service_account.yaml b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/service_account.yaml
index a65e90bc..a65e90bc 100644
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/service_account.yaml
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/service_account.yaml
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/stateful_set.yaml b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/stateful_set.yaml
index d1002378..e4ed3366 100644
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/templates/stateful_set.yaml
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/templates/stateful_set.yaml
@@ -14,6 +14,10 @@ spec:
labels:
name: {{ .Values.operator.name }}
spec:
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 65534
+ runAsGroup: 65534
containers:
- name: {{ .Values.operator.name }}
image: {{ .Values.image.repository}}:{{ .Values.image.tag }}
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/values.yaml b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/values.yaml
index 8411d77e..74012cb0 100644
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/values.yaml
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/helm/operator/values.yaml
@@ -2,5 +2,5 @@ operator:
name: m3db-operator
image:
repository: quay.io/m3db/m3db-operator
- tag: v0.1.3
+ tag: v0.8.0
environment: production
diff --git a/kud/tests/vnfs/comp-app/collection/operators/profile/manifest.yaml b/kud/tests/vnfs/comp-app/collection/operators-latest/profile/manifest.yaml
index 4d381d02..4d381d02 100644
--- a/kud/tests/vnfs/comp-app/collection/operators/profile/manifest.yaml
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/profile/manifest.yaml
diff --git a/kud/tests/vnfs/comp-app/collection/operators/profile/override_values.yaml b/kud/tests/vnfs/comp-app/collection/operators-latest/profile/override_values.yaml
index 041fc40d..041fc40d 100644
--- a/kud/tests/vnfs/comp-app/collection/operators/profile/override_values.yaml
+++ b/kud/tests/vnfs/comp-app/collection/operators-latest/profile/override_values.yaml
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/.helmignore b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/.helmignore
deleted file mode 100644
index 50af0317..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/.helmignore
+++ /dev/null
@@ -1,22 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/Chart.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/Chart.yaml
deleted file mode 100644
index 01c1eb03..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/Chart.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-apiVersion: v1
-appVersion: "1.0"
-description: A collection of operator Helm charts.
-name: operator
-version: 0.1.0
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/.helmignore b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/.helmignore
deleted file mode 100644
index f0c13194..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/.helmignore
+++ /dev/null
@@ -1,21 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/Chart.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/Chart.yaml
deleted file mode 100755
index d0ea8910..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/Chart.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-apiVersion: v1
-description: CoreOS etcd-operator Helm chart for Kubernetes
-name: etcd-operator
-version: 0.10.0
-appVersion: 0.9.4
-home: https://github.com/coreos/etcd-operator
-icon: https://raw.githubusercontent.com/coreos/etcd/master/logos/etcd-horizontal-color.png
-sources:
-- https://github.com/coreos/etcd-operator
-maintainers:
-- name: lachie83
- email: lachlan@deis.com
-- name: alejandroEsc
- email: jaescobar.cell@gmail.com
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/OWNERS b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/OWNERS
deleted file mode 100644
index 1385151c..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/OWNERS
+++ /dev/null
@@ -1,6 +0,0 @@
-approvers:
-- lachie83
-- alejandroEsc
-reviewers:
-- lachie83
-- alejandroEsc
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/README.md b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/README.md
deleted file mode 100644
index 417b19b4..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/README.md
+++ /dev/null
@@ -1,169 +0,0 @@
-# CoreOS etcd-operator
-
-[etcd-operator](https://coreos.com/blog/introducing-the-etcd-operator.html) Simplify etcd cluster
-configuration and management.
-
-__DISCLAIMER:__ While this chart has been well-tested, the etcd-operator is still currently in beta.
-Current project status is available [here](https://github.com/coreos/etcd-operator).
-
-## Introduction
-
-This chart bootstraps an etcd-operator and allows the deployment of etcd-cluster(s).
-
-## Official Documentation
-
-Official project documentation found [here](https://github.com/coreos/etcd-operator)
-
-## Prerequisites
-
-- Kubernetes 1.4+ with Beta APIs enabled
-- __Suggested:__ PV provisioner support in the underlying infrastructure to support backups
-
-## Installing the Chart
-
-To install the chart with the release name `my-release`:
-
-```bash
-$ helm install stable/etcd-operator --name my-release
-```
-
-Note that by default chart installs etcd operator only. If you want to also deploy `etcd` cluster, enable `customResources.createEtcdClusterCRD` flag:
-```bash
-$ helm install --name my-release --set customResources.createEtcdClusterCRD=true stable/etcd-operator
-```
-
-## Uninstalling the Chart
-
-To uninstall/delete the `my-release` deployment:
-
-```bash
-$ helm delete my-release
-```
-
-The command removes all the Kubernetes components EXCEPT the persistent volume.
-
-## Updating
-Once you have a new chart version, you can update your deployment with:
-```
-$ helm upgrade my-release stable/etcd-operator
-```
-
-Example resizing etcd cluster from `3` to `5` nodes during helm upgrade:
-```bash
-$ helm upgrade my-release --set etcdCluster.size=5 --set customResources.createEtcdClusterCRD=true stable/etcd-operator
-```
-
-## Configuration
-
-The following table lists the configurable parameters of the etcd-operator chart and their default values.
-
-| Parameter | Description | Default |
-| ------------------------------------------------- | -------------------------------------------------------------------- | ---------------------------------------------- |
-| `rbac.create` | Install required RBAC service account, roles and rolebindings | `true` |
-| `rbac.apiVersion` | RBAC api version `v1alpha1\|v1beta1` | `v1beta1` |
-| `serviceAccount.create` | Flag to create the service account | `true` |
-| `serviceAccount.name` | Name of the service account resource when RBAC is enabled | `etcd-operator-sa` |
-| `deployments.etcdOperator` | Deploy the etcd cluster operator | `true` |
-| `deployments.backupOperator` | Deploy the etcd backup operator | `true` |
-| `deployments.restoreOperator` | Deploy the etcd restore operator | `true` |
-| `customResources.createEtcdClusterCRD` | Create a custom resource: EtcdCluster | `false` |
-| `customResources.createBackupCRD` | Create an a custom resource: EtcdBackup | `false` |
-| `customResources.createRestoreCRD` | Create an a custom resource: EtcdRestore | `false` |
-| `etcdOperator.name` | Etcd Operator name | `etcd-operator` |
-| `etcdOperator.replicaCount` | Number of operator replicas to create (only 1 is supported) | `1` |
-| `etcdOperator.image.repository` | etcd-operator container image | `quay.io/coreos/etcd-operator` |
-| `etcdOperator.image.tag` | etcd-operator container image tag | `v0.9.3` |
-| `etcdOperator.image.pullpolicy` | etcd-operator container image pull policy | `Always` |
-| `etcdOperator.resources.cpu` | CPU limit per etcd-operator pod | `100m` |
-| `etcdOperator.resources.memory` | Memory limit per etcd-operator pod | `128Mi` |
-| `etcdOperator.securityContext` | SecurityContext for etcd operator | `{}` |
-| `etcdOperator.nodeSelector` | Node labels for etcd operator pod assignment | `{}` |
-| `etcdOperator.podAnnotations` | Annotations for the etcd operator pod | `{}` |
-| `etcdOperator.commandArgs` | Additional command arguments | `{}` |
-| `backupOperator.name` | Backup operator name | `etcd-backup-operator` |
-| `backupOperator.replicaCount` | Number of operator replicas to create (only 1 is supported) | `1` |
-| `backupOperator.image.repository` | Operator container image | `quay.io/coreos/etcd-operator` |
-| `backupOperator.image.tag` | Operator container image tag | `v0.9.3` |
-| `backupOperator.image.pullpolicy` | Operator container image pull policy | `Always` |
-| `backupOperator.resources.cpu` | CPU limit per etcd-operator pod | `100m` |
-| `backupOperator.resources.memory` | Memory limit per etcd-operator pod | `128Mi` |
-| `backupOperator.securityContext` | SecurityContext for etcd backup operator | `{}` |
-| `backupOperator.spec.storageType` | Storage to use for backup file, currently only S3 supported | `S3` |
-| `backupOperator.spec.s3.s3Bucket` | Bucket in S3 to store backup file | |
-| `backupOperator.spec.s3.awsSecret` | Name of kubernetes secret containing aws credentials | |
-| `backupOperator.nodeSelector` | Node labels for etcd operator pod assignment | `{}` |
-| `backupOperator.commandArgs` | Additional command arguments | `{}` |
-| `restoreOperator.name` | Restore operator name | `etcd-backup-operator` |
-| `restoreOperator.replicaCount` | Number of operator replicas to create (only 1 is supported) | `1` |
-| `restoreOperator.image.repository` | Operator container image | `quay.io/coreos/etcd-operator` |
-| `restoreOperator.image.tag` | Operator container image tag | `v0.9.3` |
-| `restoreOperator.image.pullpolicy` | Operator container image pull policy | `Always` |
-| `restoreOperator.resources.cpu` | CPU limit per etcd-operator pod | `100m` |
-| `restoreOperator.resources.memory` | Memory limit per etcd-operator pod | `128Mi` |
-| `restoreOperator.securityContext` | SecurityContext for etcd restore operator | `{}` |
-| `restoreOperator.spec.s3.path` | Path in S3 bucket containing the backup file | |
-| `restoreOperator.spec.s3.awsSecret` | Name of kubernetes secret containing aws credentials | |
-| `restoreOperator.nodeSelector` | Node labels for etcd operator pod assignment | `{}` |
-| `restoreOperator.commandArgs` | Additional command arguments | `{}` |
-| `etcdCluster.name` | etcd cluster name | `etcd-cluster` |
-| `etcdCluster.size` | etcd cluster size | `3` |
-| `etcdCluster.version` | etcd cluster version | `3.2.25` |
-| `etcdCluster.image.repository` | etcd container image | `quay.io/coreos/etcd-operator` |
-| `etcdCluster.image.tag` | etcd container image tag | `v3.2.25` |
-| `etcdCluster.image.pullPolicy` | etcd container image pull policy | `Always` |
-| `etcdCluster.enableTLS` | Enable use of TLS | `false` |
-| `etcdCluster.tls.static.member.peerSecret` | Kubernetes secret containing TLS peer certs | `etcd-peer-tls` |
-| `etcdCluster.tls.static.member.serverSecret` | Kubernetes secret containing TLS server certs | `etcd-server-tls` |
-| `etcdCluster.tls.static.operatorSecret` | Kubernetes secret containing TLS client certs | `etcd-client-tls` |
-| `etcdCluster.pod.antiAffinity` | Whether etcd cluster pods should have an antiAffinity | `false` |
-| `etcdCluster.pod.resources.limits.cpu` | CPU limit per etcd cluster pod | `100m` |
-| `etcdCluster.pod.resources.limits.memory` | Memory limit per etcd cluster pod | `128Mi` |
-| `etcdCluster.pod.resources.requests.cpu` | CPU request per etcd cluster pod | `100m` |
-| `etcdCluster.pod.resources.requests.memory` | Memory request per etcd cluster pod | `128Mi` |
-| `etcdCluster.pod.nodeSelector` | Node labels for etcd cluster pod assignment | `{}` |
-
-Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example:
-
-```bash
-$ helm install --name my-release --set image.tag=v0.2.1 stable/etcd-operator
-```
-
-Alternatively, a YAML file that specifies the values for the parameters can be provided while
-installing the chart. For example:
-
-```bash
-$ helm install --name my-release --values values.yaml stable/etcd-operator
-```
-
-## RBAC
-By default the chart will install the recommended RBAC roles and rolebindings.
-
-To determine if your cluster supports this running the following:
-
-```console
-$ kubectl api-versions | grep rbac
-```
-
-You also need to have the following parameter on the api server. See the following document for how to enable [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)
-
-```
---authorization-mode=RBAC
-```
-
-If the output contains "beta" or both "alpha" and "beta" you can may install rbac by default, if not, you may turn RBAC off as described below.
-
-### RBAC role/rolebinding creation
-
-RBAC resources are enabled by default. To disable RBAC do the following:
-
-```console
-$ helm install --name my-release stable/etcd-operator --set rbac.create=false
-```
-
-### Changing RBAC manifest apiVersion
-
-By default the RBAC resources are generated with the "v1beta1" apiVersion. To use "v1alpha1" do the following:
-
-```console
-$ helm install --name my-release stable/etcd-operator --set rbac.install=true,rbac.apiVersion=v1alpha1
-```
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/NOTES.txt b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/NOTES.txt
deleted file mode 100644
index 30d7ec0f..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/NOTES.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-{{- if .Values.customResources.createEtcdClusterCRD -}}
-1. Watch etcd cluster start
- kubectl get pods -l etcd_cluster={{ .Values.etcdCluster.name }} --namespace {{ .Release.Namespace }} -w
-
-2. Confirm etcd cluster is healthy
- $ kubectl run --rm -i --tty --env="ETCDCTL_API=3" --env="ETCDCTL_ENDPOINTS=http://{{ .Values.etcdCluster.name }}-client:2379" --namespace {{ .Release.Namespace }} etcd-test --image quay.io/coreos/etcd --restart=Never -- /bin/sh -c 'watch -n1 "etcdctl member list"'
-
-3. Interact with the cluster!
- $ kubectl run --rm -i --tty --env ETCDCTL_API=3 --namespace {{ .Release.Namespace }} etcd-test --image quay.io/coreos/etcd --restart=Never -- /bin/sh
- / # etcdctl --endpoints http://{{ .Values.etcdCluster.name }}-client:2379 put foo bar
- / # etcdctl --endpoints http://{{ .Values.etcdCluster.name }}-client:2379 get foo
- OK
- (ctrl-D to exit)
-
-4. Optional
- Check the etcd-operator logs
- export POD=$(kubectl get pods -l app={{ template "etcd-operator.fullname" . }} --namespace {{ .Release.Namespace }} --output name)
- kubectl logs $POD --namespace={{ .Release.Namespace }}
-
-{{- else -}}
-1. etcd-operator deployed.
- If you would like to deploy an etcd-cluster set 'customResources.createEtcdClusterCRD' to true in values.yaml
- Check the etcd-operator logs
- export POD=$(kubectl get pods -l app={{ template "etcd-operator.fullname" . }} --namespace {{ .Release.Namespace }} --output name)
- kubectl logs $POD --namespace={{ .Release.Namespace }}
-
-{{- end -}}
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/_helpers.tpl b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/_helpers.tpl
deleted file mode 100644
index e4076835..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/_helpers.tpl
+++ /dev/null
@@ -1,53 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "etcd-operator.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-*/}}
-{{- define "etcd-operator.fullname" -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- printf "%s-%s-%s" .Release.Name $name .Values.etcdOperator.name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{- define "etcd-backup-operator.name" -}}
-{{- default .Chart.Name .Values.backupOperator.name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-*/}}
-{{- define "etcd-backup-operator.fullname" -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- printf "%s-%s-%s" .Release.Name $name .Values.backupOperator.name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{- define "etcd-restore-operator.name" -}}
-{{- default .Chart.Name .Values.restoreOperator.name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-*/}}
-{{- define "etcd-restore-operator.fullname" -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- printf "%s-%s-%s" .Release.Name $name .Values.restoreOperator.name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create the name of the etcd-operator service account to use
-*/}}
-{{- define "etcd-operator.serviceAccountName" -}}
-{{- if .Values.serviceAccount.create -}}
- {{ default (include "etcd-operator.fullname" .) .Values.serviceAccount.name }}
-{{- else -}}
- {{ default "default" .Values.serviceAccount.name }}
-{{- end -}}
-{{- end -}}
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/backup-etcd-crd.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/backup-etcd-crd.yaml
deleted file mode 100644
index 5528f766..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/backup-etcd-crd.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-{{- if .Values.customResources.createBackupCRD }}
----
-apiVersion: "etcd.database.coreos.com/v1beta2"
-kind: "EtcdBackup"
-metadata:
- name: {{ template "etcd-backup-operator.fullname" . }}
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "etcd-backup-operator.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
- annotations:
- "helm.sh/hook": "post-install"
- "helm.sh/hook-delete-policy": "before-hook-creation"
-spec:
- clusterName: {{ .Values.etcdCluster.name }}
-{{ toYaml .Values.backupOperator.spec | indent 2 }}
-{{- end}} \ No newline at end of file
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/backup-operator-deployment.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/backup-operator-deployment.yaml
deleted file mode 100644
index 8b8d51b0..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/backup-operator-deployment.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-{{- if .Values.deployments.backupOperator }}
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ template "etcd-backup-operator.fullname" . }}
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "etcd-backup-operator.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-spec:
- selector:
- matchLabels:
- app: {{ template "etcd-backup-operator.fullname" . }}
- release: {{ .Release.Name }}
- replicas: {{ .Values.backupOperator.replicaCount }}
- template:
- metadata:
- name: {{ template "etcd-backup-operator.fullname" . }}
- labels:
- app: {{ template "etcd-backup-operator.fullname" . }}
- release: {{ .Release.Name }}
- spec:
- serviceAccountName: {{ template "etcd-operator.serviceAccountName" . }}
- containers:
- - name: {{ .Values.backupOperator.name }}
- image: "{{ .Values.backupOperator.image.repository }}:{{ .Values.backupOperator.image.tag }}"
- imagePullPolicy: {{ .Values.backupOperator.image.pullPolicy }}
- command:
- - etcd-backup-operator
-{{- range $key, $value := .Values.backupOperator.commandArgs }}
- - "--{{ $key }}={{ $value }}"
-{{- end }}
- env:
- - name: MY_POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: MY_POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- resources:
- limits:
- cpu: {{ .Values.backupOperator.resources.cpu }}
- memory: {{ .Values.backupOperator.resources.memory }}
- requests:
- cpu: {{ .Values.backupOperator.resources.cpu }}
- memory: {{ .Values.backupOperator.resources.memory }}
- {{- if .Values.backupOperator.nodeSelector }}
- nodeSelector: {{ toYaml .Values.backupOperator.nodeSelector | nindent 8 }}
- {{- end }}
- {{- if .Values.backupOperator.securityContext }}
- securityContext: {{ toYaml .Values.backupOperator.securityContext | nindent 8 }}
- {{- end }}
- {{- if .Values.backupOperator.tolerations }}
- tolerations: {{ toYaml .Values.backupOperator.tolerations | nindent 8 }}
- {{- end }}
-{{- end }}
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/etcd-cluster-crd.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/etcd-cluster-crd.yaml
deleted file mode 100644
index 2cccb2b2..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/etcd-cluster-crd.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-{{- if .Values.deployments.etcdOperator }}
-# Synced with https://github.com/coreos/etcd-operator/blob/master/pkg/util/k8sutil/crd.go
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: etcdclusters.etcd.database.coreos.com
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "etcd-operator.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
- annotations:
- helm.sh/hook: crd-install
- helm.sh/hook-delete-policy: before-hook-creation
-spec:
- group: etcd.database.coreos.com
- scope: Namespaced
- version: v1beta2
- names:
- kind: EtcdCluster
- listKind: EtcdClusterList
- singular: etcdcluster
- plural: etcdclusters
- shortNames:
- - etcd
-{{- end }}
-{{- if .Values.customResources.createEtcdClusterCRD }}
----
-apiVersion: "etcd.database.coreos.com/v1beta2"
-kind: "EtcdCluster"
-metadata:
- name: {{ .Values.etcdCluster.name }}
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "etcd-operator.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
- annotations:
- "helm.sh/hook": "post-install"
- "helm.sh/hook-delete-policy": "before-hook-creation"
-spec:
- size: {{ .Values.etcdCluster.size }}
- version: "{{ .Values.etcdCluster.version }}"
- pod:
-{{ toYaml .Values.etcdCluster.pod | indent 4 }}
- {{- if .Values.etcdCluster.enableTLS }}
- TLS:
-{{ toYaml .Values.etcdCluster.tls | indent 4 }}
- {{- end }}
-{{- end }} \ No newline at end of file
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-cluster-role.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-cluster-role.yaml
deleted file mode 100644
index 62085978..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-cluster-role.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-{{- if .Values.rbac.create }}
----
-apiVersion: rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }}
-kind: ClusterRole
-metadata:
- name: {{ template "etcd-operator.fullname" . }}
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "etcd-operator.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-rules:
-- apiGroups:
- - etcd.database.coreos.com
- resources:
- - etcdclusters
- - etcdbackups
- - etcdrestores
- verbs:
- - "*"
-- apiGroups:
- - apiextensions.k8s.io
- resources:
- - customresourcedefinitions
- verbs:
- - "*"
-- apiGroups:
- - ""
- resources:
- - pods
- - services
- - endpoints
- - persistentvolumeclaims
- - events
- verbs:
- - "*"
-- apiGroups:
- - apps
- resources:
- - deployments
- verbs:
- - "*"
-- apiGroups:
- - ""
- resources:
- - secrets
- verbs:
- - get
-{{- end }}
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-clusterrole-binding.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-clusterrole-binding.yaml
deleted file mode 100644
index 09594ccc..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-clusterrole-binding.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-{{- if and .Values.rbac.create .Values.deployments.etcdOperator }}
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/{{ required "A valid .Values.rbac.apiVersion entry required!" .Values.rbac.apiVersion }}
-metadata:
- name: {{ template "etcd-operator.fullname" . }}
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "etcd-operator.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-subjects:
-- kind: ServiceAccount
- name: {{ template "etcd-operator.serviceAccountName" . }}
- namespace: {{ .Release.Namespace }}
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: {{ template "etcd-operator.fullname" . }}
-{{- end }}
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-deployment.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-deployment.yaml
deleted file mode 100644
index dc50d46e..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-deployment.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
-{{- if .Values.deployments.etcdOperator }}
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ template "etcd-operator.fullname" . }}
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "etcd-operator.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-spec:
- selector:
- matchLabels:
- app: {{ template "etcd-operator.fullname" . }}
- release: {{ .Release.Name }}
- replicas: {{ .Values.etcdOperator.replicaCount }}
- template:
- metadata:
- name: {{ template "etcd-operator.fullname" . }}
- labels:
- app: {{ template "etcd-operator.fullname" . }}
- release: {{ .Release.Name }}
- annotations: {{ toYaml .Values.etcdOperator.podAnnotations | nindent 8}}
- spec:
- serviceAccountName: {{ template "etcd-operator.serviceAccountName" . }}
- containers:
- - name: {{ template "etcd-operator.fullname" . }}
- image: "{{ .Values.etcdOperator.image.repository }}:{{ .Values.etcdOperator.image.tag }}"
- imagePullPolicy: {{ .Values.etcdOperator.image.pullPolicy }}
- command:
- - etcd-operator
-{{- range $key, $value := .Values.etcdOperator.commandArgs }}
- - "--{{ $key }}={{ $value }}"
-{{- end }}
- env:
- - name: MY_POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: MY_POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- resources:
- limits:
- cpu: {{ .Values.etcdOperator.resources.cpu }}
- memory: {{ .Values.etcdOperator.resources.memory }}
- requests:
- cpu: {{ .Values.etcdOperator.resources.cpu }}
- memory: {{ .Values.etcdOperator.resources.memory }}
- {{- if .Values.etcdOperator.livenessProbe.enabled }}
- livenessProbe:
- httpGet:
- path: /readyz
- port: 8080
- initialDelaySeconds: {{ .Values.etcdOperator.livenessProbe.initialDelaySeconds }}
- periodSeconds: {{ .Values.etcdOperator.livenessProbe.periodSeconds }}
- timeoutSeconds: {{ .Values.etcdOperator.livenessProbe.timeoutSeconds }}
- successThreshold: {{ .Values.etcdOperator.livenessProbe.successThreshold }}
- failureThreshold: {{ .Values.etcdOperator.livenessProbe.failureThreshold }}
- {{- end}}
- {{- if .Values.etcdOperator.readinessProbe.enabled }}
- readinessProbe:
- httpGet:
- path: /readyz
- port: 8080
- initialDelaySeconds: {{ .Values.etcdOperator.readinessProbe.initialDelaySeconds }}
- periodSeconds: {{ .Values.etcdOperator.readinessProbe.periodSeconds }}
- timeoutSeconds: {{ .Values.etcdOperator.readinessProbe.timeoutSeconds }}
- successThreshold: {{ .Values.etcdOperator.readinessProbe.successThreshold }}
- failureThreshold: {{ .Values.etcdOperator.readinessProbe.failureThreshold }}
- {{- end }}
- {{- if .Values.etcdOperator.nodeSelector }}
- nodeSelector: {{ toYaml .Values.etcdOperator.nodeSelector | nindent 8 }}
- {{- end }}
- {{- if .Values.etcdOperator.securityContext }}
- securityContext: {{ toYaml .Values.etcdOperator.securityContext | nindent 8 }}
- {{- end }}
- {{- if .Values.etcdOperator.tolerations }}
- tolerations: {{ toYaml .Values.etcdOperator.tolerations | nindent 8 }}
- {{- end }}
-{{- end }}
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-service-account.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-service-account.yaml
deleted file mode 100644
index 423be9c4..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/operator-service-account.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-{{- if and .Values.serviceAccount.create .Values.deployments.etcdOperator }}
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ template "etcd-operator.serviceAccountName" . }}
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "etcd-operator.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
-{{- end }}
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-etcd-crd.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-etcd-crd.yaml
deleted file mode 100644
index 73faaab8..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-etcd-crd.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-{{- if .Values.customResources.createRestoreCRD }}
----
-apiVersion: "etcd.database.coreos.com/v1beta2"
-kind: "EtcdRestore"
-metadata:
- # An EtcdCluster with the same name will be created
- name: {{ .Values.etcdCluster.name }}
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "etcd-restore-operator.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
- annotations:
- "helm.sh/hook": "post-install"
- "helm.sh/hook-delete-policy": "before-hook-creation"
-spec:
- clusterSpec:
- size: {{ .Values.etcdCluster.size }}
- baseImage: "{{ .Values.etcdCluster.image.repository }}"
- version: {{ .Values.etcdCluster.image.tag }}
- pod:
-{{ toYaml .Values.etcdCluster.pod | indent 6 }}
- {{- if .Values.etcdCluster.enableTLS }}
- TLS:
-{{ toYaml .Values.etcdCluster.tls | indent 6 }}
- {{- end }}
-{{ toYaml .Values.restoreOperator.spec | indent 2 }}
-{{- end}} \ No newline at end of file
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-operator-deployment.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-operator-deployment.yaml
deleted file mode 100644
index ce27f6f8..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-operator-deployment.yaml
+++ /dev/null
@@ -1,64 +0,0 @@
-{{- if .Values.deployments.restoreOperator }}
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ template "etcd-restore-operator.fullname" . }}
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "etcd-restore-operator.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-spec:
- selector:
- matchLabels:
- app: {{ template "etcd-restore-operator.name" . }}
- release: {{ .Release.Name }}
- replicas: {{ .Values.restoreOperator.replicaCount }}
- template:
- metadata:
- name: {{ template "etcd-restore-operator.fullname" . }}
- labels:
- app: {{ template "etcd-restore-operator.name" . }}
- release: {{ .Release.Name }}
- spec:
- serviceAccountName: {{ template "etcd-operator.serviceAccountName" . }}
- containers:
- - name: {{ .Values.restoreOperator.name }}
- image: "{{ .Values.restoreOperator.image.repository }}:{{ .Values.restoreOperator.image.tag }}"
- imagePullPolicy: {{ .Values.restoreOperator.image.pullPolicy }}
- ports:
- - containerPort: {{ .Values.restoreOperator.port }}
- command:
- - etcd-restore-operator
-{{- range $key, $value := .Values.restoreOperator.commandArgs }}
- - "--{{ $key }}={{ $value }}"
-{{- end }}
- env:
- - name: MY_POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: MY_POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: SERVICE_ADDR
- value: "{{ .Values.restoreOperator.name }}:{{ .Values.restoreOperator.port }}"
- resources:
- limits:
- cpu: {{ .Values.restoreOperator.resources.cpu }}
- memory: {{ .Values.restoreOperator.resources.memory }}
- requests:
- cpu: {{ .Values.restoreOperator.resources.cpu }}
- memory: {{ .Values.restoreOperator.resources.memory }}
- {{- if .Values.restoreOperator.nodeSelector }}
- nodeSelector: {{ toYaml .Values.restoreOperator.nodeSelector | nindent 8 }}
- {{- end }}
- {{- if .Values.restoreOperator.securityContext }}
- securityContext: {{ toYaml .Values.restoreOperator.securityContext | nindent 8 }}
- {{- end }}
- {{- if .Values.restoreOperator.tolerations }}
- tolerations: {{ toYaml .Values.restoreOperator.tolerations | nindent 8 }}
- {{- end }}
-{{- end }}
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-operator-service.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-operator-service.yaml
deleted file mode 100644
index 052be364..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/templates/restore-operator-service.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-{{- if .Values.deployments.restoreOperator }}
----
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ .Values.restoreOperator.name }}
- labels:
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- app: {{ template "etcd-restore-operator.name" . }}
- heritage: {{ .Release.Service }}
- release: {{ .Release.Name }}
-spec:
- ports:
- - protocol: TCP
- name: http-etcd-restore-port
- port: {{ .Values.restoreOperator.port }}
- selector:
- app: {{ template "etcd-restore-operator.name" . }}
- release: {{ .Release.Name }}
-{{- end }}
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/values.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/values.yaml
deleted file mode 100644
index 90947807..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/etcd-operator/values.yaml
+++ /dev/null
@@ -1,162 +0,0 @@
-# Default values for etcd-operator.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-# Enable etcd-operator
-# To be used from parent operator package requirements.yaml
-enabled: true
-
-global:
- ## Reference to one or more secrets to be used when pulling images
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- ##
- imagePullSecrets: []
- # - name: "image-pull-secret"
-
-## Install Default RBAC roles and bindings
-rbac:
- create: true
- apiVersion: v1
-
-## Service account name and whether to create it
-serviceAccount:
- create: true
- name:
-
-# Select what to deploy
-deployments:
- etcdOperator: true
- # one time deployment, delete once completed,
- # Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/backup-operator.md
- backupOperator: false
- # one time deployment, delete once completed
- # Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/restore-operator.md
- restoreOperator: false
-
-# creates custom resources, not all required,
-# you could use `helm template --values <values.yaml> --name release_name ... `
-# and create the resources yourself to deploy on your cluster later
-customResources:
- createEtcdClusterCRD: true
- createBackupCRD: false
- createRestoreCRD: false
-
-# etcdOperator
-etcdOperator:
- name: etcd-operator
- replicaCount: 1
- image:
- repository: quay.io/coreos/etcd-operator
- tag: v0.9.4
- pullPolicy: Always
- resources:
- cpu: 100m
- memory: 128Mi
- ## Node labels for etcd-operator pod assignment
- ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
- nodeSelector: {}
- ## additional command arguments go here; will be translated to `--key=value` form
- ## e.g., analytics: true
- commandArgs: {}
- ## Configurable health checks against the /readyz endpoint that etcd-operator exposes
- readinessProbe:
- enabled: false
- initialDelaySeconds: 0
- periodSeconds: 10
- timeoutSeconds: 1
- successThreshold: 1
- failureThreshold: 3
- livenessProbe:
- enabled: false
- initialDelaySeconds: 0
- periodSeconds: 10
- timeoutSeconds: 1
- successThreshold: 1
- failureThreshold: 3
-# backup spec
-backupOperator:
- name: etcd-backup-operator
- replicaCount: 1
- image:
- repository: quay.io/coreos/etcd-operator
- tag: v0.9.4
- pullPolicy: Always
- resources:
- cpu: 100m
- memory: 128Mi
- spec:
- storageType: S3
- s3:
- s3Bucket:
- awsSecret:
- ## Node labels for etcd pod assignment
- ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
- nodeSelector: {}
- ## additional command arguments go here; will be translated to `--key=value` form
- ## e.g., analytics: true
- commandArgs: {}
- securityContext: {}
- tolerations: []
-
-# restore spec
-restoreOperator:
- name: etcd-restore-operator
- replicaCount: 1
- image:
- repository: quay.io/coreos/etcd-operator
- tag: v0.9.4
- pullPolicy: Always
- port: 19999
- resources:
- cpu: 100m
- memory: 128Mi
- spec:
- s3:
- # The format of "path" must be: "<s3-bucket-name>/<path-to-backup-file>"
- # e.g: "etcd-snapshot-bucket/v1/default/example-etcd-cluster/3.2.10_0000000000000001_etcd.backup"
- path:
- awsSecret:
- ## Node labels for etcd pod assignment
- ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
- nodeSelector: {}
- ## additional command arguments go here; will be translated to `--key=value` form
- ## e.g., analytics: true
- commandArgs: {}
- securityContext: {}
- tolerations: []
-
-## etcd-cluster specific values
-etcdCluster:
- name: etcd-cluster
- size: 3
- version: 3.3.3
- image:
- repository: quay.io/coreos/etcd
- tag: v3.3.3
- pullPolicy: Always
- enableTLS: false
- # TLS configs
- tls:
- static:
- member:
- peerSecret: etcd-peer-tls
- serverSecret: etcd-server-tls
- operatorSecret: etcd-client-tls
- ## etcd cluster pod specific values
- ## Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-members-cluster-with-resource-requirement
- pod:
- ## Antiaffinity for etcd pod assignment
- ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
- antiAffinity: false
- resources:
- limits:
- cpu: 100m
- memory: 128Mi
- requests:
- cpu: 100m
- memory: 128Mi
- ## Node labels for etcd pod assignment
- ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
- nodeSelector: {}
- securityContext: {}
- tolerations: []
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/Chart.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/Chart.yaml
deleted file mode 100644
index ebdc0b40..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/charts/m3db-operator/Chart.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-apiVersion: v1
-name: m3db-operator
-version: 0.1.3
-# TODO(PS) - helm has issues with GKE's SemVer
-# Error: Chart requires kubernetesVersion: >=1.10.6 which is incompatible with Kubernetes v1.10.7-gke.2
-#
-#kubeVersion: ">=1.10.7"
-description: Kubernetes operator for M3DB timeseries database
-keywords:
- - operator
- - m3
-home: https://github.com/m3db/m3db-operator
-sources:
- - https://github.com/m3db/m3db-operator
-maintainers:
- - name: m3 Authors
- email: m3db@googlegroups.com
- url: https://operator.m3db.io/
-engine: gotpl
-icon: https://raw.githubusercontent.com/m3db/m3/master/docs/theme/assets/images/M3-logo.png
-appVersion: ">0.4.7"
-tillerVersion: ">=2.11.0"
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/requirements.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/requirements.yaml
deleted file mode 100644
index 8635dc4d..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/requirements.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-dependencies:
- - name: etcd-operator
- condition: etcd-operator.enabled
- # - name: visualization-operator
- # condition: visualization-operator.enabled
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/resources/m3db.labels b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/resources/m3db.labels
deleted file mode 100644
index 4f1ddd53..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/resources/m3db.labels
+++ /dev/null
@@ -1,7 +0,0 @@
-kubectl label node/otconap6 failure-domain.beta.kubernetes.io/region=us-west1
-kubectl label node/otconap11 failure-domain.beta.kubernetes.io/region=us-west1
-kubectl label node/otccloud02 failure-domain.beta.kubernetes.io/region=us-west1
-
-kubectl label node/otconap6 failure-domain.beta.kubernetes.io/zone=us-west1-a --overwrite=true
-kubectl label node/otconap11 failure-domain.beta.kubernetes.io/zone=us-west1-b --overwrite=true
-kubectl label node/otccloud02 failure-domain.beta.kubernetes.io/zone=us-west1-c --overwrite=true \ No newline at end of file
diff --git a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/values.yaml b/kud/tests/vnfs/comp-app/collection/operators/helm/operator/values.yaml
deleted file mode 100644
index 071fa9a0..00000000
--- a/kud/tests/vnfs/comp-app/collection/operators/helm/operator/values.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright © 2019 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#################################################################
-# Global configuration defaults.
-#################################################################
-global:
- nodePortPrefix: 310
- repository: nexus3.onap.org:10001
- readinessRepository: oomk8s
- readinessImage: readiness-check:2.0.0
- loggingRepository: docker.elastic.co
- loggingImage: beats/filebeat:5.5.0
-
-#################################################################
-# k8s Operator Day-0 configuration defaults.
-#################################################################
-
-
-#################################################################
-# Enable or disable components
-#################################################################
-
-etcd-operator:
- enabled: true
- #visualization-operator:
- #enabled: true