summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJack Lucas <jflucas@research.att.com>2017-10-14 00:24:19 +0000
committerJack Lucas <jflucas@research.att.com>2017-10-14 00:25:34 +0000
commita3ff376fe84dbd3581449f83a52fc4c9abec8e81 (patch)
tree746c6366d4343fb4f1c95ef00b7a2d451212065b
parentf7e64768d235e079a93d431c1ffcfd38b3747ae1 (diff)
Add install of platform components
Change-Id: I9600acb8c3eb29a9c994b7aa95e6f4fbeb06f9ba Issue-Id: DCAEGEN2-159 Signed-off-by: Jack Lucas <jflucas@research.att.com>
-rw-r--r--bootstrap/Dockerfile-template3
-rw-r--r--bootstrap/README-docker.md85
-rwxr-xr-xbootstrap/installer-docker-new.sh-template464
-rwxr-xr-xbootstrap/installer-docker.sh-template83
4 files changed, 164 insertions, 471 deletions
diff --git a/bootstrap/Dockerfile-template b/bootstrap/Dockerfile-template
index 921f90c..935e77b 100644
--- a/bootstrap/Dockerfile-template
+++ b/bootstrap/Dockerfile-template
@@ -9,11 +9,12 @@ RUN apt-get update\
&& mkdir -p ${INSROOT}/${APPUSER}/blueprints\
&& useradd -d ${INSROOT}/${APPUSER} ${APPUSER}
COPY installer-docker.sh ${INSROOT}/${APPUSER}/installer
+COPY teardown.sh ${INSROOT}/${APPUSER}/teardown
# COPY *.yaml ${INSROOT}/${APPUSER}/blueprints/
RUN wget -P ${INSROOT}/${APPUSER}/blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/centos_vm.yaml
RUN wget -P ${INSROOT}/${APPUSER}/blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/consul_cluster.yaml
WORKDIR ${INSROOT}/${APPUSER}
-RUN chown -R ${APPUSER}:${APPUSER} ${INSROOT}/${APPUSER} && chmod +x ${INSROOT}/${APPUSER}/installer
+RUN chown -R ${APPUSER}:${APPUSER} ${INSROOT}/${APPUSER} && chmod +x ${INSROOT}/${APPUSER}/installer && chmod +x ${INSROOT}/${APPUSER}/teardown
USER ${APPUSER}
ENTRYPOINT exec "${INSROOT}/${APPUSER}/installer"
diff --git a/bootstrap/README-docker.md b/bootstrap/README-docker.md
index dbc7750..d4295ef 100644
--- a/bootstrap/README-docker.md
+++ b/bootstrap/README-docker.md
@@ -6,13 +6,17 @@
a private network interconnecting the VMs; and an external network that provides "floating" IP addresses for the VMs.A router connects the two networks. Each VM is assigned two IP addresses, one allocated from the private network when the VM is launched.
Then a floating IP is assigned to the VM from the external network. The UUID's of the private and external networks are needed for preparing the inputs.yaml file needed for running the bootstrap container.
- b) Add a public key to openStack, note its name (we will use KEYNAME as example for below). Save the private key (we will use KAYPATH as its path example), make sure it's permission is globally readable.
+ b) Add a public key to openStack, note its name (we will use KEYNAME as example for below). Save the private key (we will use KEYPATH as its path example), make sure its permission is globally readable.
c) Load the flowing base VM images to OpenStack: a CentOS 7 base image and a Ubuntu 16.04 base image.
d) Obtain the resource IDs/UUIDs for resources needed by the inputs.yaml file, as explained below, from OpenStack.
-2. On dev machine, edit an inputs.yaml file at INPUTSYAMLPATH
+2. On dev machine, set up a directory to hold environment-specific configuration files. Call its path CONFIGDIR.
+
+3. Put the private key mentioned above into CONFIGDIR as a file named `key`, and make it globally readable.
+4. Create a file named `inputs.yaml` in CONFIGDIR
+
```
1 centos7image_id: '7c8d7524-de1f-490b-8418-db294bfa2d65'
2 ubuntu1604image_id: '4b09c18b-d69e-4ba8-a1bd-562cab91ff20'
@@ -55,9 +59,80 @@ Here is a line-by-line explanation of the parameters
18. Path to the boot scripts within the raw artifact repo, for example: 'org.onap.dcaegen2.deployments/releases/scripts'
-3. Pull and run the docker container
+5. Create a file in CONFIGDIR called `invinputs.yaml`. This contains environment-specific information for the inventory service. (TODO: examples only, not the correct values for the ONAP integration environment.)
+
+```
+1 docker_host_override: "platform_dockerhost"
+2 asdc_address: "sdc.onap.org:8443"
+3 asdc_uri: "https://sdc.onap.org:8443"
+4 asdc_user: "ci"
+5 asdc_password: !!str 123456
+6 asdc_environment_name: "ONAP-AMDOCS"
+7 postgres_user_inventory: "postgres"
+8 postgres_password_inventory: "onap123"
+9 service_change_handler_image: "nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.servicechange-handler:latest"
+10 inventory_image: "nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.inventory-api:latest
+```
+Here is a line-by-line description of the parameters:
+ 1. The service name for the platform docker host (should be the same in all environments)
+ 2. The hostname and port of the SDC service
+ 3. The URI of the SDC service
+ 4. The SDC username
+ 5. The SDC password
+ 6. The SDC environment name
+ 7. The postgres user name
+ 8. The postgres password
+ 9. The Docker image to be used for the service change handler (should be the same in all environments)
+ 10. The Docker image to be used for the inventory service (should be the same in all environments)
+
+6. Create a file in CONFIGDIR called `phinputs.yaml`. This contains environment-specific information for the policy handler.
+
+```
+application_config:
+ policy_handler :
+ # parallelize the getConfig queries to policy-engine on each policy-update notification
+ thread_pool_size : 4
+
+ # parallelize requests to policy-engine and keep them alive
+ pool_connections : 20
+
+ # list of policyName prefixes (filters) that DCAE-Controller handles (=ignores any other policyName values)
+ scope_prefixes : ["DCAE.Config_"]
+
+ # retry to getConfig from policy-engine on policy-update notification
+ policy_retry_count : 5
+ policy_retry_sleep : 5
+
+ # policy-engine config
+ # These are the url of and the auth for the external system, namely the policy-engine (PDP).
+ # We obtain that info manually from PDP folks at the moment.
+ # In long run we should figure out a way of bringing that info into consul record
+ # related to policy-engine itself.
+ policy_engine :
+ url : "https://peawiv9nspd01.pedc.sbc.com:8081"
+ path_pdp : "/pdp/"
+ path_api : "/pdp/api/"
+ headers :
+ Accept : "application/json"
+ "Content-Type" : "application/json"
+ ClientAuth : "Basic bTAzOTQ5OnBvbGljeVIwY2sk"
+ Authorization : "Basic dGVzdHBkcDphbHBoYTEyMw=="
+ Environment : "TEST"
+ target_entity : "policy_engine"
+ # name of deployment-handler service in consul for policy-handler to direct the policy-updates to
+ deploy_handler : "deployment_handler"
+```
+TODO: provide explanations
+
+7. Pull and run the docker container
```
docker pull nexus3.onap.org:10003/onap/org.onap.dcaegen2.deployments.bootstrap:1.0
-docker run -v KEYPATH:/opt/app/installer/config/key -v INPUTSYAMLPATH:/opt/app/installer/config/inputs.yaml -e "LOCATION=dg2" nexus3.onap.org:10003/onap/org.onap.dcaegen2.deployments.bootstrap:1.0
+docker run -d --name boot -v CONFIGDIR:/opt/app/installer/config -e "LOCATION=dg2" nexus3.onap.org:10003/onap/org.onap.dcaegen2.deployments.bootstrap:1.0
+```
+The container stays up even after the installation is complete. Using the docker exec command to get inside of the container, then run cfy commands to interact with the Cloudify Manager.
+
+8. To tear down all of the DCAE installation:
+
+```
+docker exec -it boot ./teardown
```
-The container stays up even after the installation is complete. Using the docker exec command to get inside of the container, then run cfy commands to interact with the Cloudify Manager. \ No newline at end of file
diff --git a/bootstrap/installer-docker-new.sh-template b/bootstrap/installer-docker-new.sh-template
deleted file mode 100755
index c8cefcb..0000000
--- a/bootstrap/installer-docker-new.sh-template
+++ /dev/null
@@ -1,464 +0,0 @@
-#!/bin/bash
-#
-# ============LICENSE_START==========================================
-# ===================================================================
-# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
-# ===================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END============================================
-#
-# ECOMP and OpenECOMP are trademarks
-# and service marks of AT&T Intellectual Property.
-#
-
-# URLs for artifacts needed for installation
-DESIGTYPES={{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/type_files/dnsdesig/dns_types.yaml
-DESIGPLUG={{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/plugins/dnsdesig-1.0.0-py27-none-any.wgn
-SSHKEYTYPES={{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/type_files/sshkeyshare/sshkey_types.yaml
-SSHKEYPLUG={{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/plugins/sshkeyshare-1.0.0-py27-none-any.wgn
-OSPLUGINZIP=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/archive/1.4.zip
-OSPLUGINWGN=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/releases/download/2.2.0/cloudify_openstack_plugin-2.2.0-py27-none-linux_x86_64-centos-Core.wgn
-
-PLATBPSRC={{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}
-DOCKERBP=DockerBP.yaml
-CBSBP=config_binding_service.yaml
-CDAPBP=cdapbp7.yaml
-CDAPBROKERBP=cdap_broker.yaml
-INVBP=inventory.yaml
-DHBP=DeploymentHandler.yaml
-PHBP=policy_handler.yaml
-
-DOCKERBPURL="${PLATBPSRC}/${DOCKERBP}"
-CBSBPURL="${PLATBPSRC}/${CBSBP}"
-CDAPBPURL="${PLATBPSRC}/${CDAPBP}"
-CDAPBROKERBPURL="${PLATBPSRC}/${CDAPBROKERBP}"
-INVBPURL="${PLATBPSRC}/${INVBP}"
-DHBPURL="${PLATBPSRC}/${DHBP}"
-PHBPURL="${PLATBPSRC}/${PHBP}"
-
-LOCATIONID=$(printenv LOCATION)
-
-
-# Make sure ssh doesn't prompt for new host or choke on a new host with an IP it's seen before
-SSHOPTS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-STARTDIR=$(pwd)
-
-SSHUSER=centos
-PVTKEY=./config/key
-INPUTS=./config/inputs.yaml
-
-if [ "$LOCATION" = "" ]
-then
- echo 'Environment variable LOCATION not set. Should be set to location ID for this installation.'
- exit 1
-fi
-
-set -e
-set -x
-
-# Docker workaround for SSH key
-# In order for the container to be able to access the key when it's mounted from the Docker host,
-# the key file has to be world-readable. But ssh itself will not work with a private key that's world readable.
-# So we make a copy and change permissions on the copy.
-# NB -- the key on the Docker host has to be world-readable, which means that, from the host machine, you
-# can't use it with ssh. It needs to be a world-readable COPY.
-PVTKEY=./key600
-cp ./config/key ${PVTKEY}
-chmod 600 ${PVTKEY}
-
-# Create a virtual environment
-virtualenv dcaeinstall
-source dcaeinstall/bin/activate
-
-# Install Cloudify
-pip install cloudify==3.4.0
-
-# Install the Cloudify OpenStack plugin
-wget -qO- ${OSPLUGINZIP} > openstack.zip
-pip install openstack.zip
-
-# Spin up a VM
-
-# Get the Designate and SSH key type files and plugins
-mkdir types
-wget -qO- ${DESIGTYPES} > types/dns_types.yaml
-wget -qO- ${SSHKEYTYPES} > types/sshkey_types.yaml
-
-wget -O dnsdesig.wgn ${DESIGPLUG}
-wget -O sshkeyshare.wgn ${SSHKEYPLUG}
-
-wagon install -s dnsdesig.wgn
-wagon install -s sshkeyshare.wgn
-
-## Fix up the inputs file to get the private key locally
-sed -e "s#key_filename:.*#key_filename: $PVTKEY#" < ${INPUTS} > /tmp/local_inputs
-
-# Now install the VM
-# Don't exit on error after this point--keep container running so we can do uninstalls after a failure
-set +e
-if wget -P ./blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/centos_vm.yaml; then
- echo "Succeeded in getting the newest centos_vm.yaml"
-else
- echo "Failed to update centos_vm.yaml, using default version"
-fi
-set -e
-cfy local init --install-plugins -p ./blueprints/centos_vm.yaml -i /tmp/local_inputs -i "datacenter=$LOCATION"
-cfy local execute -w install --task-retries=10
-PUBIP=$(cfy local outputs | grep -Po '"public_ip": "\K.*?(?=")')
-
-
-## It's probably not completely ready when the installation finish, so wait
-sleep 180
-
-echo "Installing Cloudify Manager on ${PUBIP}."
-
-PVTIP=$(ssh $SSHOPTS -i "$PVTKEY" "$SSHUSER"@"$PUBIP" 'echo PVTIP=`curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4`' | grep PVTIP | sed 's/PVTIP=//')
-if [ "$PVTIP" = "" ]
-then
- echo Cannot access specified machine at $PUBIP using supplied credentials
- # Don't exit--keep the container up so we can uninstall the VM and supporting entities
- while true
- do
- sleep 300
- done
-fi
-
-
-# Copy private key onto Cloudify Manager VM
-PVTKEYPATH=$(cat ${INPUTS} | grep "key_filename" | cut -d "'" -f2)
-PVTKEYNAME=$(basename $PVTKEYPATH)
-PVTKEYDIR=$(dirname $PVTKEYPATH)
-scp $SSHOPTS -i $PVTKEY $PVTKEY $SSHUSER@$PUBIP:/tmp/$PVTKEYNAME
-ssh -t $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mkdir -p $PVTKEYDIR
-ssh -t $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/$PVTKEYNAME $PVTKEYPATH
-
-ESMAGIC=$(uuidgen -r)
-WORKDIR=$HOME/cmtmp
-BSDIR=$WORKDIR/cmbootstrap
-PVTKEY2=$BSDIR/id_rsa.cfybootstrap
-TMPBASE=$WORKDIR/tmp
-TMPDIR=$TMPBASE/lib
-SRCS=$WORKDIR/srcs.tar
-TOOL=$WORKDIR/tool.py
-rm -rf $WORKDIR
-mkdir -p $BSDIR $TMPDIR/cloudify/wheels $TMPDIR/cloudify/sources $TMPDIR/manager
-chmod 700 $WORKDIR
-cp "$PVTKEY" $PVTKEY2
-cat >$TOOL <<!EOF
-#!/usr/local/bin/python
-#
-import yaml
-import sys
-bsdir = sys.argv[1]
-with open(bsdir + '/simple-manager-blueprint-inputs.yaml', 'r') as f:
- inpyaml = yaml.load(f)
-with open(bsdir + '/simple-manager-blueprint.yaml', 'r') as f:
- bpyaml = yaml.load(f)
-for param, value in bpyaml['inputs'].items():
- if value.has_key('default') and not inpyaml.has_key(param):
- inpyaml[param] = value['default']
-print inpyaml['manager_resources_package']
-!EOF
-
-#
-# Try to disable attempt to download virtualenv when not needed
-#
-ssh $SSHOPTS -t -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "echo y; mkdir -p /root/.virtualenv; echo '"'"'[virtualenv]'"'"' >/root/.virtualenv/virtualenv.ini; echo no-download=true >>/root/.virtualenv/virtualenv.ini"'
-
-# Gather installation artifacts
-# from documentation, URL for manager blueprints archive
-BSURL=https://github.com/cloudify-cosmo/cloudify-manager-blueprints/archive/3.4.tar.gz
-BSFILE=$(basename $BSURL)
-
-umask 022
-wget -qO- $BSURL >$BSDIR/$BSFILE
-cd $BSDIR
-tar xzvf $BSFILE
-MRPURL=$(python $TOOL $BSDIR/cloudify-manager-blueprints-3.4)
-MRPFILE=$(basename $MRPURL)
-wget -qO- $MRPURL >$TMPDIR/cloudify/sources/$MRPFILE
-
-tar cf $SRCS -C $TMPDIR cloudify
-rm -rf $TMPBASE
-#
-# Load required package files onto VM
-#
-scp $SSHOPTS -i $PVTKEY2 $SRCS $SSHUSER@$PUBIP:/tmp/.
-ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "cd /opt; tar xf /tmp/srcs.tar; chown -R root:root /opt/cloudify /opt/manager; rm -rf /tmp/srcs.tar"'
-#
-# Install config file -- was done by DCAE controller. What now?
-#
-ssh $SSHOPTS -t -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc '"'"'mkdir -p /opt/dcae; if [ -f /tmp/cfy-config.txt ]; then cp /tmp/cfy-config.txt /opt/dcae/config.txt && chmod 644 /opt/dcae/config.txt; fi'"'"
-cd $WORKDIR
-
-#
-# Check for and set up https certificate information
-#
-rm -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt
-ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "openssl pkcs12 -in /opt/app/dcae-certificate/certificate.pkcs12 -passin file:/opt/app/dcae-certificate/.password -nodes -chain"' | awk 'BEGIN{x="/dev/null";}/-----BEGIN CERTIFICATE-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.crt";}/-----BEGIN PRIVATE KEY-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.key";}{print >x;}/-----END /{x="/dev/null";}'
-USESSL=false
-if [ -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key -a -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt ]
-then
- USESSL=true
-fi
-#
-# Set up configuration for the bootstrap
-#
-export CLOUDIFY_USERNAME=admin CLOUDIFY_PASSWORD=encc0fba9f6d618a1a51935b42342b17658
-cd $BSDIR/cloudify-manager-blueprints-3.4
-cp simple-manager-blueprint.yaml bootstrap-blueprint.yaml
-ed bootstrap-blueprint.yaml <<'!EOF'
-/^node_types:/-1a
- plugin_resources:
- description: >
- Holds any archives that should be uploaded to the manager.
- default: []
- dsl_resources:
- description: >
- Holds a set of dsl required resources
- default: []
-.
-/^ upload_resources:/a
- plugin_resources: { get_input: plugin_resources }
-.
-w
-q
-!EOF
-
-sed <simple-manager-blueprint-inputs.yaml >bootstrap-inputs.yaml \
- -e "s;.*public_ip: .*;public_ip: '$PUBIP';" \
- -e "s;.*private_ip: .*;private_ip: '$PVTIP';" \
- -e "s;.*ssh_user: .*;ssh_user: '$SSHUSER';" \
- -e "s;.*ssh_key_filename: .*;ssh_key_filename: '$PVTKEY2';" \
- -e "s;.*elasticsearch_java_opts: .*;elasticsearch_java_opts: '-Des.cluster.name=$ESMAGIC';" \
- -e "/ssl_enabled: /s/.*/ssl_enabled: $USESSL/" \
- -e "/security_enabled: /s/.*/security_enabled: $USESSL/" \
- -e "/admin_password: /s/.*/admin_password: '$CLOUDIFY_PASSWORD'/" \
- -e "/admin_username: /s/.*/admin_username: '$CLOUDIFY_USERNAME'/" \
- -e "s;.*manager_resources_package: .*;manager_resources_package: 'http://169.254.169.254/nosuchthing/$MRPFILE';" \
- -e "s;.*ignore_bootstrap_validations: .*;ignore_bootstrap_validations: true;" \
-
-# Add plugin resources
-# TODO Maintain plugin list as updates/additions occur
-cat >>bootstrap-inputs.yaml <<'!EOF'
-plugin_resources:
- - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-openstack-plugin/1.4/cloudify_openstack_plugin-1.4-py27-none-linux_x86_64-centos-Core.wgn'
- - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-fabric-plugin/1.4.1/cloudify_fabric_plugin-1.4.1-py27-none-linux_x86_64-centos-Core.wgn'
- - '{{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/plugins/dnsdesig-1.0.0-py27-none-any.wgn'
- - '{{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/plugins/sshkeyshare-1.0.0-py27-none-any.wgn'
- - '{{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_plugins_releases }}/plugins/cdapcloudify/cdapcloudify-14.2.5-py27-none-any.wgn'
- - '{{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_plugins_releases }}/plugins/dcaepolicyplugin/dcaepolicyplugin-1.0.0-py27-none-any.wgn'
- - '{{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_plugins_releases }}/plugins/dockerplugin/dockerplugin-2.4.0-py27-none-any.wgn'
- - '{{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_plugins_releases }}/plugins/relationshipplugin/relationshipplugin-1.0.0-py27-none-any.wgn'
-!EOF
-#
-# And away we go
-#
-cfy init -r
-cfy bootstrap --install-plugins -p bootstrap-blueprint.yaml -i bootstrap-inputs.yaml
-rm -f resources/ssl/server.key
-
-# Install Consul VM via a blueprint
-cd $STARTDIR
-mkdir consul
-cd consul
-cfy init -r
-cfy use -t ${PUBIP}
-echo "Deploying Consul VM"
-
-set +e
-if wget -P ../blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/consul_cluster.yaml; then
- echo "Succeeded in getting the newest consul_cluster.yaml"
-else
- echo "Failed to update consul_cluster.yaml, using default version"
-fi
-set -e
-cfy install -p ../blueprints/consul_cluster.yaml -d consul -i ../${INPUTS} -i "datacenter=$LOCATION"
-
-# Get the floating IP for one member of the cluster
-# Needed for instructing the Consul agent on CM host to join the cluster
-CONSULIP=$(cfy deployments outputs -d consul | grep -Po 'Value: \K.*')
-echo Consul deployed at $CONSULIP
-
-# Wait for Consul API to come up
-until curl http://$CONSULIP:8500/v1/agent/services
-do
- echo Waiting for Consul API
- sleep 60
-done
-
-# Wait for a leader to be elected
-until [[ "$(curl -Ss http://$CONSULIP:8500/v1/status/leader)" != '""' ]]
-do
- echo Waiting for leader
- sleep 30
-done
-
-# Instruct the client-mode Consul agent running on the CM to join the cluster
-curl http://$PUBIP:8500/v1/agent/join/$CONSULIP
-
-# Register Cloudify Manager in Consul via the local agent on CM host
-
-REGREQ="
-{
- \"Name\" : \"cloudify_manager\",
- \"ID\" : \"cloudify_manager\",
- \"Tags\" : [\"http://${PUBIP}/api/v2.1\"],
- \"Address\": \"${PUBIP}\",
- \"Port\": 80,
- \"Check\" : {
- \"Name\" : \"cloudify_manager_health\",
- \"Interval\" : \"300s\",
- \"HTTP\" : \"http://${PUBIP}/api/v2.1/status\",
- \"Status\" : \"passing\",
- \"DeregisterCriticalServiceAfter\" : \"30m\"
- }
-}
-"
-
-curl -X PUT -H 'Content-Type: application/json' --data-binary "$REGREQ" http://$PUBIP:8500/v1/agent/service/register
-# Make Consul address available to plugins on Cloudify Manager
-# TODO probably not necessary anymore
-ENVINI=$(mktemp)
-cat <<!EOF > $ENVINI
-[$LOCATION]
-CONSUL_HOST=$CONSULIP
-CONFIG_BINDING_SERVICE=config_binding_service
-!EOF
-scp $SSHOPTS -i ../$PVTKEY $ENVINI $SSHUSER@$PUBIP:/tmp/env.ini
-ssh -t $SSHOPTS -i ../$PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/env.ini /opt/env.ini
-rm $ENVINI
-
-
-
-##### INSTALLATION OF PLATFORM COMPONENTS
-
-# Get component blueprints
-wget -P ./blueprints/docker/ ${DOCKERBPURL}
-wget -P ./blueprints/cbs/ ${CBSBPURL}
-wget -P ./blueprints/cdap/ ${CDAPBPURL}
-wget -P ./blueprints/cdapbroker/ ${CDAPBROKERBPURL}
-wget -P ./blueprints/inv/ ${INVBPURL}
-wget -P ./blueprints/dh/ ${DHBPURL}
-wget -P ./blueprints/ph/ ${PHBPURL}
-
-# Set up the credentials for access to the Docker registry
-curl -X PUT -H "Content-Type: application/json" --data-binary '[{"username":"docker", "password":"docker", "registry": "nexus3.onap.org:10001"}]' http://${CONSULIP}:8500/v1/kv/docker_plugin/docker_logins
-
-# Install platform Docker host
-# Note we're still in the "consul" directory, which is init'ed for talking to CM
-
-
-# CDAP cluster
-cfy install -p ../blueprints/cdap/${CDAPBP} -b cdapbp7 -d cdap7 -i ../${INPUTS} -i "location_id=$LOCATION"
-
-
-# Docker host for platform containers
-cfy install -v -p ../blueprints/docker/${DOCKERBP} -b DockerBP -d DockerPlatform -i ../${INPUTS} -i "registered_dockerhost_name=platform_dockerhost" -i "registrator_image=onapdcae/registrator:v7" -i "location_id=${LOCATION}" -i "node_name=dokp00"
-
-
-# Docker host for service containers
-cfy deployments create -b DockerBP -d DockerComponent -i ../${INPUTS} -i "registered_dockerhost_name=component_dockerhost" -i "location_id=${LOCATION}" -i "registrator_image=onapdcae/registrator:v7" -i "node_name=doks00"
-cfy executions start -d DockerComponent -w install
-
-# wait for the extended platform VMs settle
-sleep 180
-
-
-# CDAP cluster
-cfy install -p ../blueprints/cdap/${CDAPBP} -b cdapbp7 -d cdap7 -i ../${INPUTS} -i "location_id=${LOCATION}"
-
-
-# config binding service
-cfy install -p ../blueprints/cbs/${CBSBP} -b config_binding_service -d config_binding_service -i "location_id=${LOCATION}"
-
-
-# Inventory
-cat >../invinputs <<EOL
-location_id: "${LOCATION}"
-docker_host_override: "platform_dockerhost"
-asdc_address: ""
-asdc_uri: ""
-asdc_user: ""
-asdc_password: ""
-asdc_environment_name: ""
-postgres_user_inventory: "postgres"
-postgres_password_inventory: "onap123"
-service_change_handler_image: "nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.servicechange-handler:latest"
-inventory_image: "nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.inventory-api:latest"
-EOL
-cfy install -p ../blueprints/inv/${INVBP} -b PlatformServicesInventory -d PlatformServicesInventory -i ../invinputs
-
-
-# Deployment Handler DH
-cat >../dhinputs <<EOL
-application_config:
- cloudify:
- protocol: "http"
- inventory:
- protocol: "http"
-EOL
-cfy install -p ../blueprints/dh/${DHBP} -b DeploymentHandlerBP -d DeploymentHandler -i "location_id=${LOCATION}" -i ../dhinputs
-
-
-# Policy Handler PH
-cat >../phinputs <<EOL
-application_config:
- policy_handler :
- # parallelize the getConfig queries to policy-engine on each policy-update notification
- thread_pool_size : 4
-
- # parallelize requests to policy-engine and keep them alive
- pool_connections : 20
-
- # list of policyName prefixes (filters) that DCAE-Controller handles (=ignores any other policyName values)
- scope_prefixes : ["DCAE.Config_"]
-
- # retry to getConfig from policy-engine on policy-update notification
- policy_retry_count : 5
- policy_retry_sleep : 5
-
- # policy-engine config
- # These are the url of and the auth for the external system, namely the policy-engine (PDP).
- # We obtain that info manually from PDP folks at the moment.
- # In long run we should figure out a way of bringing that info into consul record
- # related to policy-engine itself.
- policy_engine :
- url : "https://peawiv9nspd01.pedc.sbc.com:8081"
- path_pdp : "/pdp/"
- path_api : "/pdp/api/"
- headers :
- Accept : "application/json"
- "Content-Type" : "application/json"
- ClientAuth : "Basic bTAzOTQ5OnBvbGljeVIwY2sk"
- Authorization : "Basic dGVzdHBkcDphbHBoYTEyMw=="
- Environment : "TEST"
- target_entity : "policy_engine"
- # name of deployment-handler service in consul for policy-handler to direct the policy-updates to
- deploy_handler : "deployment_handler"
-EOL
-cfy install -p ../blueprints/ph/${PHBP} -b policy_handler_BP -d policy_handler -i 'policy_handler_image=nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.policy-handler:1.1-latest' -i "location_id=${LOCATION}" -i ../phinputs
-
-
-# CDAP Broker
-cfy install -p ../blueprints/cdapbroker/${CDAPBROKERBP} -b cdapbroker -d cdapbroker -i "location_id=${LOCATION}"
-
-
-
-
-
-# Keep the container up
-while true
-do
- sleep 300
-done
diff --git a/bootstrap/installer-docker.sh-template b/bootstrap/installer-docker.sh-template
index bf0f3fc..5284837 100755
--- a/bootstrap/installer-docker.sh-template
+++ b/bootstrap/installer-docker.sh-template
@@ -29,6 +29,24 @@ SSHKEYPLUG={{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }
OSPLUGINZIP=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/archive/1.4.zip
OSPLUGINWGN=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/releases/download/2.2.0/cloudify_openstack_plugin-2.2.0-py27-none-linux_x86_64-centos-Core.wgn
+PLATBPSRC={{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints
+DOCKERBP=DockerBP.yaml
+CBSBP=config_binding_service.yaml
+CDAPBP=cdapbp7.yaml
+CDAPBROKERBP=cdap_broker.yaml
+INVBP=inventory.yaml
+DHBP=DeploymentHandler.yaml
+PHBP=policy_handler.yaml
+
+DOCKERBPURL="${PLATBPSRC}/${DOCKERBP}"
+CBSBPURL="${PLATBPSRC}/${CBSBP}"
+CDAPBPURL="${PLATBPSRC}/${CDAPBP}"
+CDAPBROKERBPURL="${PLATBPSRC}/${CDAPBROKERBP}"
+INVBPURL="${PLATBPSRC}/${INVBP}"
+DHBPURL="${PLATBPSRC}/${DHBP}"
+PHBPURL="${PLATBPSRC}/${PHBP}"
+
+LOCATIONID=$(printenv LOCATION)
# Make sure ssh doesn't prompt for new host or choke on a new host with an IP it's seen before
SSHOPTS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
@@ -258,7 +276,7 @@ cfy use -t ${PUBIP}
echo "Deploying Consul VM"
set +e
-if wget -P ../blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/consul_cluster.yaml; then
+if wget -P ../blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/consul_cluster.yaml; then
echo "Succeeded in getting the newest consul_cluster.yaml"
else
echo "Failed to update consul_cluster.yaml, using default version"
@@ -320,6 +338,69 @@ scp $SSHOPTS -i ../$PVTKEY $ENVINI $SSHUSER@$PUBIP:/tmp/env.ini
ssh -t $SSHOPTS -i ../$PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/env.ini /opt/env.ini
rm $ENVINI
+
+##### INSTALLATION OF PLATFORM COMPONENTS
+
+# Get component blueprints
+wget -P ./blueprints/docker/ ${DOCKERBPURL}
+wget -P ./blueprints/cbs/ ${CBSBPURL}
+wget -P ./blueprints/cdap/ ${CDAPBPURL}
+wget -P ./blueprints/cdapbroker/ ${CDAPBROKERBPURL}
+wget -P ./blueprints/inv/ ${INVBPURL}
+wget -P ./blueprints/dh/ ${DHBPURL}
+wget -P ./blueprints/ph/ ${PHBPURL}
+
+
+# Set up the credentials for access to the Docker registry
+curl -X PUT -H "Content-Type: application/json" --data-binary '[{"username":"docker", "password":"docker", "registry": "nexus3.onap.org:10001"}]' http://${CONSULIP}:8500/v1/kv/docker_plugin/docker_logins
+
+# Install platform Docker host
+# Note we're still in the "consul" directory, which is init'ed for talking to CM
+
+set +e
+# Docker host for platform containers
+cfy install -v -p ./blueprints/docker/${DOCKERBP} -b DockerBP -d DockerPlatform -i ../${INPUTS} -i "registered_dockerhost_name=platform_dockerhost" -i "registrator_image=onapdcae/registrator:v7" -i "location_id=${LOCATION}" -i "node_name=dokp00" -i "target_datacenter=${LOCATION}"
+
+# Docker host for service containers
+cfy deployments create -b DockerBP -d DockerComponent -i ../${INPUTS} -i "registered_dockerhost_name=component_dockerhost" -i "location_id=${LOCATION}" -i "registrator_image=onapdcae/registrator:v7" -i "node_name=doks00" -i "target_datacenter=${LOCATION}"
+cfy executions start -d DockerComponent -w install
+
+# wait for the extended platform VMs settle
+#sleep 180
+
+
+# CDAP cluster
+#cfy install -p ./blueprints/cdap/${CDAPBP} -b cdapbp7 -d cdap7 -i ../${INPUTS} -i "location_id=${LOCATION}"
+
+
+# config binding service
+cfy install -p ./blueprints/cbs/${CBSBP} -b config_binding_service -d config_binding_service -i "location_id=${LOCATION}"
+
+
+# Inventory
+cfy install -p ./blueprints/inv/${INVBP} -b PlatformServicesInventory -d PlatformServicesInventory -i "location_id=${LOCATION}" -i ../config/invinputs.yaml
+
+
+# Deployment Handler DH
+cat >../dhinputs <<EOL
+application_config:
+ cloudify:
+ protocol: "http"
+ inventory:
+ protocol: "http"
+EOL
+cfy install -p ./blueprints/dh/${DHBP} -b DeploymentHandlerBP -d DeploymentHandler -i "location_id=${LOCATION}" -i ../dhinputs
+
+
+# Policy Handler PH
+cfy install -p ./blueprints/ph/${PHBP} -b policy_handler_BP -d policy_handler -i 'policy_handler_image=nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.policy-handler:1.1-latest' -i "location_id=${LOCATION}" -i ../config/phinputs.yaml
+
+
+# CDAP Broker
+#cfy install -p ./blueprints/cdapbroker/${CDAPBROKERBP} -b cdapbroker -d cdapbroker -i "location_id=${LOCATION}"
+
+
+# Keep the container up
while true
do
sleep 300