aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTodd Malsbary <todd.malsbary@intel.com>2021-03-31 13:34:22 -0700
committerTodd Malsbary <todd.malsbary@intel.com>2021-05-04 13:36:06 -0700
commit1e343febbbd958143ad29e4feb4a31baca01e981 (patch)
treea65ceaf8c5528638bd5342d2cdf853efcad5a496
parente8b026c82e813dd275064b24b0af0ae5f2e89ffb (diff)
Replace emco with openness-21.03 release
This change also installs emcoctl in the artifacts directory, similar to what is done for kubectl by kubespray. Issue-ID: MULTICLOUD-1324 Signed-off-by: Todd Malsbary <todd.malsbary@intel.com> Change-Id: I8447210487578ceeef61afc7c3e4d97905303c8a
-rw-r--r--kud/deployment_infra/playbooks/configure-emco-reset.yml50
-rw-r--r--kud/deployment_infra/playbooks/configure-emco.yml161
-rw-r--r--kud/deployment_infra/playbooks/emco-monitor-openness-21.03.patch13
-rw-r--r--kud/deployment_infra/playbooks/emcoconfig.yaml.j221
-rw-r--r--kud/deployment_infra/playbooks/emcoctl-openness-21.03.patch13
-rw-r--r--kud/deployment_infra/playbooks/kud-vars.yml31
-rwxr-xr-xkud/hosting_providers/containerized/installer.sh11
-rwxr-xr-xkud/hosting_providers/vagrant/installer.sh4
-rwxr-xr-xkud/tests/emco.sh122
9 files changed, 343 insertions, 83 deletions
diff --git a/kud/deployment_infra/playbooks/configure-emco-reset.yml b/kud/deployment_infra/playbooks/configure-emco-reset.yml
index 7cad36e4..d13bb9e7 100644
--- a/kud/deployment_infra/playbooks/configure-emco-reset.yml
+++ b/kud/deployment_infra/playbooks/configure-emco-reset.yml
@@ -8,40 +8,44 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- hosts: kube-master
+- hosts: localhost
+ become: yes
tasks:
- name: Load kud variables
include_vars:
file: kud-vars.yml
- - name: Change the emco directory and run helm delete
- command: /usr/local/bin/helm uninstall --namespace emco emco
- register: helm_delete
- args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
+ - name: Check if emco is installed
+ command: /usr/local/bin/helm -n emco list
+ register: helm_list
- - debug:
- var: helm_delete.stdout_lines
+ - name: Set helm_installed fact
+ set_fact:
+ helm_installed: "{{ helm_list.stdout | regex_findall('^\\S+', multiline=True) }}"
- - name: Change the emco directory and delete the emco namespace
- command: /usr/local/bin/kubectl delete ns emco
- register: delete_emco_ns
- args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
+ - name: Uninstall monitor helm chart
+ command: /usr/local/bin/helm uninstall --namespace emco monitor
+ when: '"monitor" in helm_installed'
- - debug:
- var: delete_emco_ns.stdout_lines
+ - name: Uninstall emco helm charts
+ command: /usr/local/bin/helm uninstall --namespace emco emco
+ when: '"emco" in helm_installed'
- - name: Change the emco directory and make clean
+ - name: Change to the emco directory and delete the emco namespace
+ command: /usr/local/bin/kubectl delete ns emco --ignore-not-found=true
+
+ - name: Check if emco directory exists
+ stat:
+ path: "{{ emco_dir }}"
+ register: emco_dir_stat
+
+ - name: Change to the emco directory and make clean
command: /usr/bin/make clean
- register: make_clean
args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
-
- - debug:
- var: make_clean.stdout_lines
+ chdir: "{{ emco_dir }}/deployments/helm/emcoOpenNESS"
+ when: emco_dir_stat.stat.exists
- - name: clean multicloud-k8s path
+ - name: Clean emco directory
file:
state: absent
- path: /opt/multicloud
+ path: "{{ emco_dir }}"
diff --git a/kud/deployment_infra/playbooks/configure-emco.yml b/kud/deployment_infra/playbooks/configure-emco.yml
index 96b4a23d..82ce61ad 100644
--- a/kud/deployment_infra/playbooks/configure-emco.yml
+++ b/kud/deployment_infra/playbooks/configure-emco.yml
@@ -8,7 +8,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- hosts: kube-master
+- hosts: localhost
+ become: yes
tasks:
- name: Load kud variables
include_vars:
@@ -16,43 +17,159 @@
- name: Getting emco code in /opt folder
git:
- repo: 'https://github.com/onap/multicloud-k8s.git'
- dest: /opt/multicloud
+ repo: "{{ emco_git_url }}"
+ version: "{{ emco_version }}"
+ dest: "{{ emco_dir }}"
+ depth: 1
+ force: yes
- - name: install make package for ubuntu systems
+ - name: Install make package for Ubuntu systems
apt: name=make state=present update_cache=yes
when: ansible_distribution == "Ubuntu"
- - name: install make package for centos systems
+ - name: Install make package for CentOS systems
yum: name=make state=present update_cache=yes
when: ansible_distribution == "CentOS"
- - name: Change the emco directory and run the command make all
- command: /usr/bin/make all
- register: make_all
- args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
-
- - debug:
- var: make_all.stdout_lines
-
- name: Create emco namespace
shell: "/usr/local/bin/kubectl create namespace emco"
ignore_errors: True
- name: Create pod security policy role bindings
- shell: "/usr/local/bin/kubectl -n emco create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=emco:default --serviceaccount=emco:emco-fluentd"
+ shell: "/usr/local/bin/kubectl -n emco create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=emco:default --serviceaccount=emco:emco-fluentd --serviceaccount=emco:monitor"
ignore_errors: True
+ - name: Set artifacts_dir fact
+ set_fact:
+ artifacts_dir: "{{ hostvars[groups['kube-master'][0]]['inventory_dir'] }}/artifacts"
+
+ - name: Make emco helm charts
+ command: /usr/bin/make all
+ args:
+ chdir: "{{ emco_dir }}/deployments/helm/emcoOpenNESS"
+ when: "'emco' in emco_roles"
+
- name: Get cluster name
- shell: "kubectl -n kube-system get configmap/kubeadm-config -o yaml | grep clusterName: | awk '{print $2}'"
+ shell: "/usr/local/bin/kubectl -n kube-system get configmap/kubeadm-config -o yaml | grep clusterName: | awk '{print $2}'"
register: cluster_name
+ when: "'emco' in emco_roles"
+
+ - name: Create helm override values
+ copy:
+ dest: "{{ emco_dir }}/deployments/helm/emcoOpenNESS/helm_value_overrides.yaml"
+ content: |
+ {{ emco_values | to_nice_yaml(indent=2) }}
+ when: "'emco' in emco_roles"
+
+ - name: Install emco helm charts
+ command: /usr/local/bin/helm install --wait --namespace emco -f helm_value_overrides.yaml --set emco-db.etcd.clusterDomain={{ cluster_name.stdout }} --set emco-tools.fluentd.clusterDomain={{ cluster_name.stdout }} emco dist/packages/emco-0.1.0.tgz
+ args:
+ chdir: "{{ emco_dir }}/deployments/helm/emcoOpenNESS"
+ when: "'emco' in emco_roles"
+
+ - name: Apply patch to emcoctl
+ patch:
+ src: emcoctl-openness-21.03.patch
+ basedir: "{{ emco_dir }}"
+ strip: 1
+ when: emco_version == "openness-21.03"
+
+ - name: Make emcoctl
+ command: /usr/bin/make
+ args:
+ chdir: "{{ emco_dir }}/src/tools/emcoctl"
+ when: "'emco' in emco_roles"
- - name: Change the emco directory and run the command helm install
- command: /usr/local/bin/helm install --namespace emco --set emco-tools.fluentd.clusterDomain={{ cluster_name.stdout }} emco dist/packages/emco-0.1.0.tgz
- register: helm_install
+ - name: Get emco host address
+ shell: "/usr/local/bin/kubectl -n kube-system get configmap/kubeadm-config -o yaml | awk '/advertiseAddress:/ {print $2;exit}'"
+ register: emco_host
+ when: "'emco' in emco_roles"
+
+ - name: Write emcoctl config on ansible host
+ template:
+ src: emcoconfig.yaml.j2
+ dest: "{{ artifacts_dir }}/emcoconfig.yaml"
+ mode: 0640
+ become: no
+ run_once: yes
+ vars:
+ host: "{{ emco_host.stdout }}"
+ when:
+ - emcoconfig_localhost
+ - "'emco' in emco_roles"
+
+ - name: Copy emcoctl binary to ansible host
+ copy:
+ src: "{{ emco_dir }}/bin/emcoctl/emcoctl"
+ dest: "{{ artifacts_dir }}/emcoctl"
+ mode: 0755
+ become: no
+ run_once: yes
+ when:
+ - emcoctl_localhost
+ - "'emco' in emco_roles"
+
+ - name: Create helper script emcoctl.sh on ansible host
+ copy:
+ content: |
+ #!/bin/bash
+ ${BASH_SOURCE%/*}/emcoctl --config ${BASH_SOURCE%/*}/emcoconfig.yaml "$@"
+ dest: "{{ artifacts_dir }}/emcoctl.sh"
+ mode: 0755
+ become: no
+ run_once: yes
+ when:
+ - emcoctl_localhost
+ - emcoconfig_localhost
+ - "'emco' in emco_roles"
+
+ - name: Apply patch to monitor chart
+ patch:
+ src: emco-monitor-openness-21.03.patch
+ basedir: "{{ emco_dir }}"
+ strip: 1
+ when:
+ - emco_version == "openness-21.03"
+ - "'monitor' in emco_roles"
+
+ - name: Package monitor chart
+ command: /usr/local/bin/helm package monitor
+ args:
+ chdir: "{{ emco_dir }}/deployments/helm"
+ when: "'monitor' in emco_roles"
+
+ - name: Install monitor helm chart
+ command: /usr/local/bin/helm install --wait --namespace emco --set registryPrefix={{ emco_repository }} --set tag={{ emco_version }} monitor monitor-0.1.0.tgz
args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
+ chdir: "{{ emco_dir }}/deployments/helm"
+ when: "'monitor' in emco_roles"
+
+- hosts: kube-master
+ become: yes
+ tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+
+ - name: Get emco host address
+ shell: "/usr/local/bin/kubectl -n kube-system get configmap/kubeadm-config -o yaml | grep advertiseAddress: | awk '{print $2}'"
+ register: emco_host
+ when: "'emco' in emco_roles"
+
+ - name: Install emcoctl config
+ template:
+ src: emcoconfig.yaml.j2
+ dest: "~/.emco.yaml"
+ mode: 0640
+ become: no
+ run_once: yes
+ vars:
+ host: "{{ emco_host.stdout }}"
+ when: "'emco' in emco_roles"
- - debug:
- var: helm_install.stdout_lines
+ - name: Install emcoctl
+ copy:
+ src: "{{ emco_dir }}/bin/emcoctl/emcoctl"
+ dest: "/usr/local/bin/emcoctl"
+ mode: 0755
+ when: "'emco' in emco_roles"
diff --git a/kud/deployment_infra/playbooks/emco-monitor-openness-21.03.patch b/kud/deployment_infra/playbooks/emco-monitor-openness-21.03.patch
new file mode 100644
index 00000000..44c72b6c
--- /dev/null
+++ b/kud/deployment_infra/playbooks/emco-monitor-openness-21.03.patch
@@ -0,0 +1,13 @@
+diff --git a/deployments/helm/monitor/templates/clusterrolebinding.yaml b/deployments/helm/monitor/templates/clusterrolebinding.yaml
+index 70305e50..6616787b 100644
+--- a/deployments/helm/monitor/templates/clusterrolebinding.yaml
++++ b/deployments/helm/monitor/templates/clusterrolebinding.yaml
+@@ -7,7 +7,7 @@ metadata:
+ subjects:
+ - kind: ServiceAccount
+ name: monitor
+- namespace: default
++ namespace: {{ .Release.Namespace }}
+ roleRef:
+ kind: ClusterRole
+ name: monitor
diff --git a/kud/deployment_infra/playbooks/emcoconfig.yaml.j2 b/kud/deployment_infra/playbooks/emcoconfig.yaml.j2
new file mode 100644
index 00000000..0131cd88
--- /dev/null
+++ b/kud/deployment_infra/playbooks/emcoconfig.yaml.j2
@@ -0,0 +1,21 @@
+orchestrator:
+ host: {{ host }}
+ port: 30415
+clm:
+ host: {{ host }}
+ port: 30461
+ncm:
+ host: {{ host }}
+ port: 30431
+ovnaction:
+ host: {{ host }}
+ port: 30471
+dcm:
+ host: {{ host }}
+ port: 30477
+gac:
+ host: {{ host }}
+ port: 30491
+dtc:
+ host: {{ host }}
+ port: 30481
diff --git a/kud/deployment_infra/playbooks/emcoctl-openness-21.03.patch b/kud/deployment_infra/playbooks/emcoctl-openness-21.03.patch
new file mode 100644
index 00000000..a0b308d3
--- /dev/null
+++ b/kud/deployment_infra/playbooks/emcoctl-openness-21.03.patch
@@ -0,0 +1,13 @@
+diff --git a/src/tools/emcoctl/cmd/utils.go b/src/tools/emcoctl/cmd/utils.go
+index 9f0821e..3d16b92 100644
+--- a/src/tools/emcoctl/cmd/utils.go
++++ b/src/tools/emcoctl/cmd/utils.go
+@@ -106,7 +106,7 @@ func readResources() []Resources {
+ return []Resources{}
+ }
+ valDec := yaml.NewDecoder(v)
+- var mapDoc map[string]string
++ var mapDoc interface{}
+ if valDec.Decode(&mapDoc) != nil {
+ fmt.Println("Values file format incorrect:", "error", err, "filename", valuesFiles[0])
+ return []Resources{}
diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml
index c430b46f..2735a055 100644
--- a/kud/deployment_infra/playbooks/kud-vars.yml
+++ b/kud/deployment_infra/playbooks/kud-vars.yml
@@ -84,3 +84,34 @@ cpu_manager:
checkpoint_file: "/var/lib/kubelet/cpu_manager_state"
topology_manager:
policy: "best-effort" # Options: none (disabled), best-effort (default), restricted, single-numa-node
+
+emco_git_url: "https://github.com/open-ness/EMCO.git"
+emco_repository: "integratedcloudnative/"
+emco_version: "openness-21.03"
+emco_dir: "/opt/emco"
+emco_values:
+ global:
+ repository: "{{ emco_repository }}"
+ pullPolicy: IfNotPresent
+ emco-services:
+ orchestrator:
+ imageTag: "{{ emco_version }}"
+ ncm:
+ imageTag: "{{ emco_version }}"
+ rsync:
+ imageTag: "{{ emco_version }}"
+ clm:
+ imageTag: "{{ emco_version }}"
+ ovnaction:
+ imageTag: "{{ emco_version }}"
+ dcm:
+ imageTag: "{{ emco_version }}"
+ dtc:
+ imageTag: "{{ emco_version }}"
+ gac:
+ imageTag: "{{ emco_version }}"
+emcoconfig_localhost: true
+emcoctl_localhost: true
+emco_roles:
+- emco
+- monitor
diff --git a/kud/hosting_providers/containerized/installer.sh b/kud/hosting_providers/containerized/installer.sh
index b2ec52af..eb30a23e 100755
--- a/kud/hosting_providers/containerized/installer.sh
+++ b/kud/hosting_providers/containerized/installer.sh
@@ -90,16 +90,13 @@ function install_k8s {
tee $cluster_log/setup-kubernetes.log
# Configure environment
+ # Requires kubeconfig_localhost and kubectl_localhost to be true
+ # in inventory/group_vars/k8s-cluster.yml
mkdir -p $HOME/.kube
cp $kud_inventory_folder/artifacts/admin.conf $HOME/.kube/config
- # Copy Kubespray kubectl to be usable in host running Ansible.
- # Requires kubectl_localhost: true in inventory/group_vars/k8s-cluster.yml
if !(which kubectl); then
cp $kud_inventory_folder/artifacts/kubectl /usr/local/bin/
fi
-
- cp -rf $kud_inventory_folder/artifacts \
- /opt/kud/multi-cluster/$cluster_name/
}
# install_addons() - Install Kubenertes AddOns
@@ -219,6 +216,10 @@ function install_cluster {
fi
echo "installed the addons"
+ # Copy installation artifacts to be usable in host running Ansible
+ cp -rf $kud_inventory_folder/artifacts \
+ /opt/kud/multi-cluster/$cluster_name/
+
_print_kubernetes_info
}
diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh
index 2a15de33..9be77a1f 100755
--- a/kud/hosting_providers/vagrant/installer.sh
+++ b/kud/hosting_providers/vagrant/installer.sh
@@ -36,6 +36,8 @@ function _install_go {
export PATH=$PATH:/usr/local/go/bin
sudo sed -i "s|^PATH=.*|PATH=\"$PATH\"|" /etc/environment
+ #allow golang to work with sudo
+ sudo sed -i 's|secure_path="\([^"]\+\)"|secure_path="\1:/usr/local/go/bin"|' /etc/sudoers
}
# _install_pip() - Install Python Package Manager
@@ -182,7 +184,6 @@ function install_addons {
# install_plugin() - Install ONAP Multicloud Kubernetes plugin
function install_plugin {
echo "Installing multicloud/k8s plugin"
- _install_go
_install_docker
sudo -E pip install --no-cache-dir docker-compose
@@ -260,6 +261,7 @@ echo "Removing ppa for jonathonf/python-3.6"
sudo ls /etc/apt/sources.list.d/ || true
sudo find /etc/apt/sources.list.d -maxdepth 1 -name '*jonathonf*' -delete || true
sudo apt-get update
+_install_go
install_k8s
_set_environment_file
install_addons
diff --git a/kud/tests/emco.sh b/kud/tests/emco.sh
index 2b8eab1e..7cc3ca33 100755
--- a/kud/tests/emco.sh
+++ b/kud/tests/emco.sh
@@ -1,19 +1,7 @@
#!/bin/bash
-# Copyright 2020 Intel Corporation, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
set -o errexit
set -o nounset
@@ -35,6 +23,7 @@ rsync_service_port=30441
rsync_service_host="$master_ip"
base_url_orchestrator=${base_url_orchestrator:-"http://$master_ip:30415/v2"}
base_url_clm=${base_url_clm:-"http://$master_ip:30461/v2"}
+base_url_dcm=${base_url_dcm:-"http://$master_ip:30477/v2"}
CSAR_DIR="/opt/csar"
csar_id="cb009bfe-bbee-11e8-9766-525400435678"
@@ -94,6 +83,41 @@ labeldata="$(cat<<EOF
EOF
)"
+admin_logical_cloud_name="lcadmin"
+admin_logical_cloud_data="$(cat << EOF
+{
+ "metadata" : {
+ "name": "${admin_logical_cloud_name}",
+ "description": "logical cloud description",
+ "userData1":"<user data>",
+ "userData2":"<user data>"
+ },
+ "spec" : {
+ "level": "0"
+ }
+ }
+}
+EOF
+)"
+
+lc_cluster_1_name="lc1-c1"
+cluster_1_data="$(cat << EOF
+{
+ "metadata" : {
+ "name": "${lc_cluster_1_name}",
+ "description": "logical cloud cluster 1 description",
+ "userData1":"<user data>",
+ "userData2":"<user data>"
+ },
+
+ "spec" : {
+ "cluster-provider": "${clusterprovidername}",
+ "cluster-name": "${clustername}",
+ "loadbalancer-ip" : "0.0.0.0"
+ }
+}
+EOF
+)"
# add the rsync controller entry
rsynccontrollername="rsync"
@@ -316,7 +340,7 @@ deployment_intent_group_data="$(cat <<EOF
"profile":"${collection_composite_profile_name}",
"version":"${release}",
"override-values":[],
- "logical-cloud":"unused_logical_cloud"
+ "logical-cloud":"${admin_logical_cloud_name}"
}
}
EOF
@@ -352,6 +376,8 @@ function createOrchestratorData {
print_msg "creating project entry"
call_api -d "${projectdata}" "${base_url_orchestrator}/projects"
+ createLogicalCloudData
+
print_msg "creating collection composite app entry"
call_api -d "${compositeapp_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps"
@@ -403,27 +429,30 @@ function deleteOrchestratorData {
print_msg "Begin deleteOrchestratorData"
- delete_resource "${base_url_orchestrator}/controllers/${rsynccontrollername}"
+ delete_resource_nox "${base_url_orchestrator}/controllers/${rsynccontrollername}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${prometheus_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${collectd_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${prometheus_placement_intent_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${collectd_placement_intent_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${prometheus_profile_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${collectd_profile_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${prometheus_profile_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${collectd_profile_name}"
delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${prometheus_app_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${prometheus_app_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${collectd_app_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${collectd_app_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}"
+
+ deleteLogicalCloud
+
delete_resource_nox "${base_url_orchestrator}/projects/${projectname}"
print_msg "deleteOrchestratorData done"
@@ -443,12 +472,28 @@ function createClmData {
function deleteClmData {
print_msg "begin deleteClmData"
- delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels/${labelname}"
+ delete_resource_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels/${labelname}"
delete_resource_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}"
- delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}"
+ delete_resource_nox "${base_url_clm}/cluster-providers/${clusterprovidername}"
print_msg "deleteClmData done"
}
+function createLogicalCloudData {
+ print_msg "creating logical cloud"
+ call_api -d "${admin_logical_cloud_data}" "${base_url_dcm}/projects/${projectname}/logical-clouds"
+ call_api -d "${cluster_1_data}" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/cluster-references"
+}
+
+function getLogicalCloudData {
+ call_api_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}"
+ call_api_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/cluster-references/${lc_cluster_1_name}"
+}
+
+function deleteLogicalCloud {
+ delete_resource_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/cluster-references/${lc_cluster_1_name}"
+ delete_resource_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}"
+}
+
function createData {
createClmData
createOrchestratorData
@@ -460,13 +505,25 @@ function deleteData {
}
function instantiate {
+ call_api -d "{ }" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/instantiate"
call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/approve"
- call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"
+ # instantiate may fail due to the logical cloud not yet instantiated, so retry
+ try=0
+ until call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"; do
+ if [[ $try -lt 10 ]]; then
+ sleep 1s
+ else
+ return 1
+ fi
+ try=$((try + 1))
+ done
+ return 0
}
-
function terminateOrchData {
+ call_api -d "{ }" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/terminate"
call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/terminate"
+ call_api -d "{ }" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/terminate"
}
function status {
@@ -479,13 +536,13 @@ function waitFor {
# Setup
-function setup {
+function setupEmcoTest {
install_deps
populate_CSAR_composite_app_helm "$csar_id"
}
function start {
- setup
+ setupEmcoTest
deleteData
print_msg "Before creating, deleting the data success"
createData
@@ -516,6 +573,7 @@ function usage {
if [[ "$#" -gt 0 ]] ; then
case "$1" in
+ "setup" ) setupEmcoTest ;;
"start" ) start ;;
"stop" ) stop ;;
"create" ) createData ;;