summaryrefslogtreecommitdiffstats
path: root/kud
diff options
context:
space:
mode:
authorRajamohan Raj <rajamohan.raj@intel.com>2020-04-30 23:07:15 +0000
committerRajamohan Raj <rajamohan.raj@intel.com>2020-05-12 19:47:41 +0000
commit8fd7fd2ba9db1fb2dbe22c0cf89edb80454cff6d (patch)
tree867d42e86073a5b3ca3917096e8f99f8f791d448 /kud
parent8e0c00c4c59add2fa03a67081d74cd46934d034e (diff)
Create appContext and save to etcd
In this patch, following tasks are accomplished 1. Creation of appContext and storing the appcontexts for each app in the compositeApp into etcd as part of the instantiation process 2. Added a util method to extract parameters from k8s manifest files. 3. Added a new testing script to auto create NCM artifacts through the NCM APIs 4. Modified the existing plugin_collection_v2.sh to better test the orchestrator APIs. 5. Added logging to appcontext lib 6. Bug fix in the helm charts. Issue-ID: MULTICLOUD-1064 Signed-off-by: Rajamohan Raj <rajamohan.raj@intel.com> Change-Id: I1b0e4d1351ad3a083be529239748015ea5db2a41
Diffstat (limited to 'kud')
-rwxr-xr-xkud/tests/plugin_collection_v2.sh67
-rwxr-xr-xkud/tests/plugin_ncm_v2.sh196
-rw-r--r--kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/role.yaml2
-rw-r--r--kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/rolebinding.yaml2
4 files changed, 253 insertions, 14 deletions
diff --git a/kud/tests/plugin_collection_v2.sh b/kud/tests/plugin_collection_v2.sh
index 09dec5c4..5ebed6ad 100755
--- a/kud/tests/plugin_collection_v2.sh
+++ b/kud/tests/plugin_collection_v2.sh
@@ -33,6 +33,7 @@ if [ ${1:+1} ]; then
fi
base_url=${base_url:-"http://localhost:9015/v2"}
+
kubeconfig_path="$HOME/.kube/config"
csar_id=cb009bfe-bbee-11e8-9766-525400435678
@@ -69,12 +70,12 @@ appIntentNameForApp1="appIntentForApp1"
appIntentForApp1Desc="AppIntentForApp1Desc"
appIntentNameForApp2="appIntentForApp2"
appIntentForApp2Desc="AppIntentForApp2Desc"
-providerName1="aws"
-providerName2="azure"
-clusterName1="edge1"
-clusterName2="edge2"
-clusterLabelName1="east-us1"
-clusterLabelName2="east-us2"
+providerName1="cluster_provider1"
+providerName2="cluster_provider2"
+clusterName1="clusterName1"
+clusterName2="clusterName2"
+clusterLabelName1="clusterLabel1"
+clusterLabelName2="clusterLabel2"
deploymentIntentGroupName="test_deployment_intent_group"
deploymentIntentGroupNameDesc="test_deployment_intent_group_desc"
@@ -93,7 +94,53 @@ cloud_region_owner="localhost"
install_deps
populate_CSAR_composite_app_helm "$csar_id"
-# BEGIN: Register project API
+# BEGIN :: Delete statements are issued so that we clean up the 'orchestrator' collection
+# and freshly populate the documents, also it serves as a direct test
+# for all our DELETE APIs and an indirect test for all GET APIs
+
+
+print_msg "Deleting intentToBeAddedinDeploymentIntentGroup"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/deployment-intent-groups/${deploymentIntentGroupName}/intents/${intentToBeAddedinDeploymentIntentGroup}"
+
+print_msg "Deleting ${deploymentIntentGroupName}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/deployment-intent-groups/${deploymentIntentGroupName}"
+
+print_msg "Deleting ${appIntentNameForApp2}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/generic-placement-intents/${genericPlacementIntentName}/app-intents/${appIntentNameForApp2}"
+
+print_msg "Deleting ${appIntentNameForApp1}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/generic-placement-intents/${genericPlacementIntentName}/app-intents/${appIntentNameForApp1}"
+
+print_msg "Deleting ${genericPlacementIntentName}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/generic-placement-intents/${genericPlacementIntentName}"
+
+print_msg "Deleting ${sub_composite_profile_name2}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/composite-profiles/${main_composite_profile_name}/profiles/${sub_composite_profile_name2}"
+
+print_msg "Deleting ${sub_composite_profile_name1}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/composite-profiles/${main_composite_profile_name}/profiles/${sub_composite_profile_name1}"
+
+print_msg "Deleting ${main_composite_profile_name}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/composite-profiles/${main_composite_profile_name}"
+
+print_msg "Deleting ${app2_name}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/apps/${app2_name}"
+
+print_msg "Deleting ${app1_name}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/apps/${app1_name}"
+
+print_msg "Deleting ${composite_app_name}/${composite_app_version}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}"
+
+print_msg "Deleting ${project_name}"
+delete_resource "${base_url}/projects/${project_name}"
+
+# END :: Delete statements were issued so that we clean up the db
+# and freshly populate the documents, also it serves as a direct test
+# for all our DELETE APIs and an indirect test for all GET APIs
+
+
+# BEGIN: Register project
print_msg "Registering project"
payload="$(cat <<EOF
{
@@ -107,9 +154,9 @@ payload="$(cat <<EOF
EOF
)"
call_api -d "${payload}" "${base_url}/projects"
-# END: Register project API
+# END: Register project
-# BEGIN: Register composite-app API
+# BEGIN: Register composite-app
print_msg "Registering composite-app"
payload="$(cat <<EOF
{
@@ -126,7 +173,7 @@ payload="$(cat <<EOF
EOF
)"
call_api -d "${payload}" "${base_url}/projects/${project_name}/composite-apps"
-# END: Register composite-app API
+# END: Register composite-app
diff --git a/kud/tests/plugin_ncm_v2.sh b/kud/tests/plugin_ncm_v2.sh
new file mode 100755
index 00000000..b7d791f3
--- /dev/null
+++ b/kud/tests/plugin_ncm_v2.sh
@@ -0,0 +1,196 @@
+# /*
+# * Copyright 2020 Intel Corporation, Inc
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+set -o errexit
+set -o nounset
+set -o pipefail
+#set -o xtrace
+
+source _common_test.sh
+source _functions.sh
+source _common.sh
+
+base_url=${base_url:-"http://localhost:9016/v2"}
+
+kubeconfig_path="$HOME/.kube/config"
+
+cluster_provider_name1="cluster_provider1"
+cluster_provider_name2="cluster_provider2"
+cluster_provider_desc1="cluster_provider1_Desc"
+cluster_provider_desc2="cluster_provider2_Desc"
+userData1="user1"
+userData2="user2"
+
+clusterName1="clusterName1"
+cluster_desc1="cluster_desc1"
+clusterName2="clusterName2"
+cluster_desc2="cluster_desc2"
+#clusterName3 and clusterName4 shall be added with clusterLabel1 and clusterLabel2
+# under cluster_provider1 and cluster_provider2 respectively
+clusterName3="clusterName3"
+cluster_desc3="cluster_desc3"
+clusterName4="clusterName4"
+cluster_desc4="cluster_desc4"
+
+clusterLabel1="clusterLabel1"
+clusterLabel2="clusterLabel2"
+
+# BEGIN :: Delete statements are issued so that we clean up the 'cluster' collection
+# and freshly populate the documents, also it serves as a direct test
+# for all our DELETE APIs and an indirect test for all GET APIs
+print_msg "Deleting the clusterLabel1 and clusterLabel2, if they were existing"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName3}/labels/${clusterLabel1}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName4}/labels/${clusterLabel2}"
+# Above statements delete the clusterLabel1 and clusterLabel2 which are linked to cluster3 and cluster4
+
+print_msg "Deleting the cluster1, cluster2, cluster3, cluster4 if they were existing"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName1}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName2}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName3}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName4}"
+
+print_msg "Deleting the cluster-providers, if they were existing"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}"
+
+# END :: Delete statements are issued so that we clean up the 'cluster' collection
+# and freshly populate the documents, also it serves as a direct test
+# for all our DELETE APIs and an indirect test for all GET APIs
+
+# BEGIN: Register cluster_provider_name1 and cluster_provider_name2
+print_msg "Deleting the cluster-providers, if they were existing"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}"
+
+print_msg "Registering cluster_provider_name1"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${cluster_provider_name1}",
+ "description": "${cluster_provider_desc1}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/cluster-providers"
+
+print_msg "Registering cluster_provider_name2"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${cluster_provider_name2}",
+ "description": "${cluster_provider_desc2}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/cluster-providers"
+# END: Register cluster_provider_name1 and cluster_provider_name2
+
+# BEGIN : Register cluster1, cluster2, cluster3 and cluster4
+print_msg "Registering cluster1"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${clusterName1}",
+ "description": "${cluster_desc1}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/cluster-providers/${cluster_provider_name1}/clusters" >/dev/null #massive output
+
+
+print_msg "Registering cluster2"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${clusterName2}",
+ "description": "${cluster_desc2}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/cluster-providers/${cluster_provider_name2}/clusters" >/dev/null #massive output
+
+
+print_msg "Registering cluster3"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${clusterName3}",
+ "description": "${cluster_desc3}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/cluster-providers/${cluster_provider_name1}/clusters" >/dev/null #massive output
+
+
+print_msg "Registering cluster4"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${clusterName4}",
+ "description": "${cluster_desc4}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/cluster-providers/${cluster_provider_name2}/clusters" >/dev/null #massive output
+
+# END : Register cluster1, cluster2, cluster3 and cluster4
+
+
+# BEGIN: adding labels to cluster3 and cluster4
+print_msg "Adding label to cluster3"
+payload="$(cat <<EOF
+{
+ "label-name" : "${clusterLabel1}"
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName3}/labels"
+
+print_msg "Adding label to cluster2"
+payload="$(cat <<EOF
+{
+ "label-name" : "${clusterLabel2}"
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName4}/labels"
+
diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/role.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/role.yaml
index dfb932d8..a3c69c31 100644
--- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/role.yaml
+++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/role.yaml
@@ -1,7 +1,5 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
metadata:
name: {{ template "prometheus.fullname" . }}-prometheus
labels:
diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/rolebinding.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/rolebinding.yaml
index 04932ee1..a721cd42 100644
--- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/rolebinding.yaml
+++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/rolebinding.yaml
@@ -1,7 +1,5 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
metadata:
name: {{ template "prometheus.fullname" . }}-prometheus
labels: