aboutsummaryrefslogtreecommitdiffstats
path: root/scripts/bootstrap.sh
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/bootstrap.sh')
-rwxr-xr-xscripts/bootstrap.sh66
1 files changed, 2 insertions, 64 deletions
diff --git a/scripts/bootstrap.sh b/scripts/bootstrap.sh
index 7fa339a..ff953cf 100755
--- a/scripts/bootstrap.sh
+++ b/scripts/bootstrap.sh
@@ -22,7 +22,6 @@
# ONAP common Kubernetes namespace in ONAP_NAMESPACE environment variable
# If DCAE components are deployed in a separate Kubernetes namespace, that namespace in DCAE_NAMESPACE variable.
# Consul address with port in CONSUL variable
-# Plugin wagon files in /wagons
# Blueprints for components to be installed in /blueprints
# Input files for components to be installed in /inputs
# Configuration JSON files that need to be loaded into Consul in /dcae-configs
@@ -119,18 +118,6 @@ function deploy {
fi
}
-# Install plugin if it's not already installed
-# $1 -- path to wagon file for plugin
-function install_plugin {
- ARCHIVE=$(basename $1)
- # See if it's already installed
- if cm_hasany "plugins?archive_name=$ARCHIVE"
- then
- echo plugin $1 already installed on ${CMADDR}
- else
- cfy plugin upload $1
- fi
-}
### END FUNCTION DEFINTIONS ###
@@ -141,35 +128,14 @@ trap keep_running ERR
set -e
-# Consul service registration data
-CBS_REG='{"ID": "dcae-cbs0", "Name": "config_binding_service", "Address": "config-binding-service", "Port": 10000}'
-CBS_REG1='{"ID": "dcae-cbs1", "Name": "config-binding-service", "Address": "config-binding-service", "Port": 10000}'
-
# Set up profile to access Cloudify Manager
cfy profiles use -u admin -t default_tenant -p "${CMPASS}" ${CFYTLS} "${CMADDR}"
# Output status, for debugging purposes
cfy status
-# Check Consul readiness
-# The readiness container waits for a "consul-server" container to be ready,
-# but this isn't always enough. We need the Consul API to be up and for
-# the cluster to be formed, otherwise our Consul accesses might fail.
-# (Note in ONAP R2, we never saw a problem, but occasionally in R3 we
-# have seen Consul not be fully ready, so we add these checks, originally
-# used in the R1 HEAT-based deployment.)
-# Wait for Consul API to come up
-until curl http://${CONSUL}/v1/agent/services
-do
- echo Waiting for Consul API
- sleep 60
-done
-# Wait for a leader to be elected
-until [[ "$(curl -Ss http://{$CONSUL}/v1/status/leader)" != '""' ]]
-do
- echo Waiting for leader
- sleep 30
-done
+# Store the CM password into a Cloudify secret
+cfy secret create -s ${CMPASS} cmpass
# Load configurations into Consul KV store
for config in /dcae-configs/*.json
@@ -181,34 +147,6 @@ do
curl -v -X PUT -H "Content-Type: application/json" --data-binary @/tmp/dcae-upload ${CONSUL}/v1/kv/${key}
done
-# Put service registrations into the local Consul configuration directory
-for sr in CBS_REG CBS_REG1
-do
- echo '{"service" : ' ${!sr} ' }'> /opt/consul/config/${sr}.json
-done
-
-# Start the local consul agent instance
-/opt/consul/bin/consul agent --config-dir /opt/consul/config 2>&1 | tee /opt/consul/consul.log &
-
-# Store the CM password into a Cloudify secret
-cfy secret create -s ${CMPASS} cmpass
-
-# Load plugins onto CM
-for wagon in /wagons/*.wgn
-do
- install_plugin ${wagon}
-done
-
-# In some oversubscribed cloud environments, we have
-# observed that even though the plugin installations appear
-# to have completed, there are background installation tasks
-# that might still be running. So we check for running system workflows
-while cm_hasany "executions?is_system_workflow=true&status=pending&status=started&status=queued&status=scheduled"
-do
- echo "Waiting for running system workflows to complete"
- sleep 15
-done
-
# After this point, failures should not stop the script or block later commands
trap - ERR
set +e