diff options
author | Lusheng Ji <lji@research.att.com> | 2017-09-11 20:11:29 +0000 |
---|---|---|
committer | Lusheng Ji <lji@research.att.com> | 2017-09-11 20:11:44 +0000 |
commit | c38ca2f40aa0eee42fb7b2c61be4ba6e4f9aa56a (patch) | |
tree | 09d8462e1a8838b9fd575e5f9b6fcbb6af94c8e1 /bootstrap | |
parent | 55f488022631ce6ab14b440e747b2e9d7648b49c (diff) |
Add bootstrap
Issue-Id: DCAEGEN2-75
Change-Id: I0158762c1c3e29612381734a608fa821e903e92e
Signed-off-by: Lusheng Ji <lji@research.att.com>
Diffstat (limited to 'bootstrap')
-rw-r--r-- | bootstrap/Dockerfile-template | 19 | ||||
-rw-r--r-- | bootstrap/README-docker.md | 114 | ||||
-rwxr-xr-x | bootstrap/installer-docker.sh-template | 320 | ||||
-rw-r--r-- | bootstrap/pom.xml | 173 |
4 files changed, 626 insertions, 0 deletions
diff --git a/bootstrap/Dockerfile-template b/bootstrap/Dockerfile-template new file mode 100644 index 0000000..f15261b --- /dev/null +++ b/bootstrap/Dockerfile-template @@ -0,0 +1,19 @@ +FROM ubuntu:16.04 +MAINTAINER maintainer +ENV INSROOT /opt/app +ENV APPUSER installer +RUN apt-get update\ + && apt-get install -y wget python-virtualenv python-pip ssh ed curl\ + && apt-get clean\ + && pip install --upgrade pip\ + && mkdir -p ${INSROOT}/${APPUSER}/blueprints\ + && useradd -d ${INSROOT}/${APPUSER} ${APPUSER} +COPY installer-docker.sh ${INSROOT}/${APPUSER}/installer +# COPY *.yaml ${INSROOT}/${APPUSER}/blueprints/ +RUN wget -P ${INSROOT}/${APPUSER}/blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/centos_vm.yaml +RUN wget -P ${INSROOT}/${APPUSER}/blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/consul_cluster.yaml +WORKDIR ${INSROOT}/${APPUSER} +RUN chown -R ${APPUSER}:${APPUSER} ${INSROOT}/${APPUSER} && chmod +x ${INSROOT}/${APPUSER}/installer +USER ${APPUSER} +ENTRYPOINT exec "${INSROOT}/${APPUSER}/installer" + diff --git a/bootstrap/README-docker.md b/bootstrap/README-docker.md new file mode 100644 index 0000000..af4edff --- /dev/null +++ b/bootstrap/README-docker.md @@ -0,0 +1,114 @@ +## Dockerized bootstrap for Cloudify Manager and Consul cluster + +1. Preparations +a) Add a public key to openStack, note its name (we will use KEYNAME as example for below). Save the private key (we will use KAYPATH as its path example), make sure it's permission is globally readable. +b) Load the folowing base VM images to OpenStack: a CentOS 7 base image and a Ubuntu 16.04 base image. +c) Obatin the resource IDs/UUIDs for resources needed by the inputs.yaml file, as explained belowi, from OpenStack. +d) DCAEGEN2 boot straping assumes that VMs are assigned private IP addresses from a network. Each VM can also be assigned a floating public IP address from another network. + + +2. On dev machine, edit an inputs.yaml file at INPUTSYAMLPATH +``` +1 centos7image_id: '7c8d7524-de1f-490b-8418-db294bfa2d65' +2 ubuntu1604image_id: '4b09c18b-d69e-4ba8-a1bd-562cab91ff20' +3 flavor_id: '4' +4 security_group: '55a11193-6559-4f6c-b2d2-0119a9817062' +5 public_net: 'admin_floating_228_net' +6 private_net: 'onap-f-net' +7 openstack: +8 username: 'MY_LOGIN' +9 password: 'MY_PASSWORD' +10 tenant_name: 'TENANT_NAME' +11 auth_url: 'KEYSTONE_AUTH_URL' +12 region: 'RegionOne' +13 keypair: 'KEYNME' +14 key_filename: '/opt/dcae/key' +15 location_prefix: 'onapr1' +16 location_domain: 'onap-f.onap.homer.att.com' +17 codesource_url: 'https://nexus01.research.att.com:8443/repository' +18 codesource_version: 'solutioning01-mte2' +``` +Here is a line-by-line explanation of the arameters +1 UUID of the OpenStack's CentOD 7 VM image +2 UUID of the OpenStack's Ubuntu 16.04 VM image +3 ID of the OpenStack's VM flavor to be used by DCAEGEN2 VMs +4 UUID of the OpenStack's security group to be used for DCAEGEN2 VMs +5 The name of the OpenStack network where public IP addresses are allocated from +6 The name of the OpenStack network where private IP addresses are allocated from +7 Group header for OpenStack Keystone parameters +8 User name +9 Password +10 Name of the OpenStack tenant/project where DCAEGEN2 VMs are deployed +11 Openstack authentication API URL, for example 'https://horizon.playground.onap.org:5000/v2.0' +12 Name of the OpenStack region where DCAEGEN2 VMs are deployed, for example 'RegionOne' +13 Name of the public key uploaded to OpenStack in the Prepration step +14 Path to the private key within the conatiner (!! Do not change!!) +15 Prefix (location code) of all DCAEGEN2 VMs +16 Domain name of the OpenStack tenant 'onapr1.playground.onap.org' +17 Location of the raw artifact repo hosting additional boot scripts called by DCAEGEN2 VMs' cloud-init, for example: + 'https://nexus.onap.org/service/local/repositories/raw/content' +18 Path to the boot scripts within the raw artifact repo, for example: 'org.onap.dcaegen2.deployments.scripts/releases/' + + +3. Pull and run the docker conatiner +``` +docker pull nexus3.onap.org:10003/onap/org.onap.dcaegen2.deployments.bootstrap:1.0 + + +docker run -d -v /home/ubuntu/JFLucasBootStrap/utils/platform_base_installation/key:/opt/app/installer/config/key -v /home/ubuntu/JFLucasBootStrap/utils/platform_base_installation/inputs.yaml:/opt/app/installer/config/inputs.yaml -e "LOCATION=dg2" bootstrap + +docker run -d -v KEYPATH:/opt/app/installer/config/key -v INPUTSYAMLPATH:/opt/app/installer/config/inputs.yaml -e "LOCATION=dg2" nexus3.onap.org:10003/onap/org.onap.dcaegen2.deployments.bootstrap:1.0 + +``` + + +R +`expand.sh` expands the blueprints and the installer script so they +point to the repo where the necessary artifacts (plugins, type files) +are store. + +`docker build -t bootstrap .` builds the image + +`docker run -d -v /path/to/worldreadable_private_key:/opt/app/installer/config/key -v /path/to/inputs_file:/opt/app/installer/config/inputs.yaml -e "LOCATION=location_id_here" --name bsexec bootstrap` runs the container and (if you're lucky) does the deployment. + +( +1. the private key is THE private key for the public key added to OpenStack +2. the path to inputs and key file are FULL path starting from / +3. --name is optional. if so the container name will be random +) + + +`example-inputs.yaml` is, as the name suggests, an example inputs file. The values in it work in the ONAP-Future environment, except for the +user name and password. + +To watch the action use +`docker logs -f bsexec` + +The container stays up even after the installation is complete. +To enter the running container: +`docker exec -it bsexec /bin/bash` +Once in the container, to uninstall CM and the host VM and its supporting entities +`source dcaeinstall/bin/active` +`cfy local uninstall` + +(But remember--before uninstalling CM, be sure to go to CM first and uninstall the Consul cluster.) + + +####TODOS: +- Integrate with the maven-based template expansion. +- Integrate with maven-based Docker build and push to LF Docker repo +- Add full list of plugins to be installed onto CM +- Separate the Docker stuff from the non-Docker installation. (The blueprints are common to both methods.) +- Get rid of any AT&T-isms +- (Maybe) Move the installation of the Cloudify CLI and the sshkeyshare and dnsdesig plugins into the Dockerfile, +so the image has everything set up and can just enter the vevn and start the Centos VM installation. +- Figure out what (if anything) needs to change if the container is deployed by Kubernetes rather than vanilla Docker +- Make sure the script never exits, even in the face of errors. We need the container to stay up so we can do uninstalls. +- Figure out how to add in the deployments for the rest of the DCAE platform components. (If this container deploys all of DCAE, +should it move out of the CCSDK project and into DCAE?) +- Figure out the right way to get the Cloudify OpenStack plugins and the Cloudify Fabric plugins onto CM. Right now there are +handbuilt wagons in the Nexus repo. (In theory, CM should be able to install these plugins whenever a blueprint calls for them. However, +they require gcc, and we're not installing gcc on our CM host.) +- Maybe look at using a different base image--the Ubuntu 16.04 image needs numerous extra packages installed. +- The blueprint for Consul shows up in Cloudify Manager with the name 'blueprints'. I'll leave it as an exercise for the reader to figure why +and to figure out how to change it. (See ~ line 248 of installer-docker.sh-template.) diff --git a/bootstrap/installer-docker.sh-template b/bootstrap/installer-docker.sh-template new file mode 100755 index 0000000..672f397 --- /dev/null +++ b/bootstrap/installer-docker.sh-template @@ -0,0 +1,320 @@ +#!/bin/bash +# +# ============LICENSE_START========================================== +# =================================================================== +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END============================================ +# +# ECOMP and OpenECOMP are trademarks +# and service marks of AT&T Intellectual Property. +# + +# URLs for artifacts needed for installation +DESIGTYPES={{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/type_files/dnsdesig/dns_types.yaml +DESIGPLUG={{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/plugins/dnsdesig-1.0.0-py27-none-any.wgn +SSHKEYTYPES={{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/type_files/sshkeyshare/sshkey_types.yaml +SSHKEYPLUG={{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/plugins/sshkeyshare-1.0.0-py27-none-any.wgn +OSPLUGINZIP=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/archive/1.4.zip + +# Make sure ssh doesn't prompt for new host or choke on a new host with an IP it's seen before +SSHOPTS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" +STARTDIR=$(pwd) + +SSHUSER=centos +PVTKEY=./config/key +INPUTS=./config/inputs.yaml + +if ["$LOCATION" = "" ] +then + echo 'Environment variable LOCATION not set. Should be set to location ID for this installation.' + exit 1 +fi + +set -e +set -x + +# Docker workaround for SSH key +# In order for the container to be able to access the key when it's mounted from the Docker host, +# the key file has to be world-readable. But ssh itself will not work with a private key that's world readable. +# So we make a copy and change permissions on the copy. +# NB -- the key on the Docker host has to be world-readable, which means that, from the host machine, you +# can't use it with ssh. It needs to be a world-readable COPY. +PVTKEY=./key600 +cp ./config/key ${PVTKEY} +chmod 600 ${PVTKEY} + +# Create a virtual environment +virtualenv dcaeinstall +source dcaeinstall/bin/activate + +# Install Cloudify +pip install cloudify==3.4.0 + +# Install the Cloudify OpenStack plugin +wget -qO- ${OSPLUGINZIP} > openstack.zip +pip install openstack.zip + +# Spin up a VM + +# Get the Designate and SSH key type files and plugins +mkdir types +wget -qO- ${DESIGTYPES} > types/dns_types.yaml +wget -qO- ${SSHKEYTYPES} > types/sshkey_types.yaml + +wget -O dnsdesig.wgn ${DESIGPLUG} +wget -O sshkeyshare.wgn ${SSHKEYPLUG} + +wagon install -s dnsdesig.wgn +wagon install -s sshkeyshare.wgn + +## Fix up the inputs file to get the private key locally +sed -e "s#key_filename:.*#key_filename: $PVTKEY#" < ${INPUTS} > /tmp/local_inputs + +# Now install the VM +# Don't exit on error after this point--keep container running so we can do uninstalls after a failure +set +e +if wget -P ./blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/centos_vm.yaml; then + echo "Succeeded in getting the newest centos_vm.yaml" +else + echo "Failed to update centos_vm.yaml, using default version" +fi +set -e +cfy local init --install-plugins -p ./blueprints/centos_vm.yaml -i /tmp/local_inputs -i "datacenter=$LOCATION" +cfy local execute -w install --task-retries=10 +PUBIP=$(cfy local outputs | grep -Po '"public_ip": "\K.*?(?=")') + + +## It's probably not completely ready when the installation finish, so wait +sleep 180 + +echo "Installing Cloudify Manager on ${PUBIP}." + +PVTIP=$(ssh $SSHOPTS -i "$PVTKEY" "$SSHUSER"@"$PUBIP" 'echo PVTIP=`curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4`' | grep PVTIP | sed 's/PVTIP=//') +if [ "$PVTIP" = "" ] +then + echo Cannot access specified machine at $PUBIP using supplied credentials + # Don't exit--keep the container up so we can uninstall the VM and supporting entities + while true + do + sleep 300 + done +fi + + +# Copy private key onto Cloudify Manager VM +PVTKEYPATH=$(cat ${INPUTS} | grep "key_filename" | cut -d "'" -f2) +PVTKEYNAME=$(basename $PVTKEYPATH) +PVTKEYDIR=$(dirname $PVTKEYPATH) +scp $SSHOPTS -i $PVTKEY $PVTKEY $SSHUSER@$PUBIP:/tmp/$PVTKEYNAME +ssh -t $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mkdir -p $PVTKEYDIR +ssh -t $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/$PVTKEYNAME $PVTKEYPATH + +ESMAGIC=$(uuidgen -r) +WORKDIR=$HOME/cmtmp +BSDIR=$WORKDIR/cmbootstrap +PVTKEY2=$BSDIR/id_rsa.cfybootstrap +TMPBASE=$WORKDIR/tmp +TMPDIR=$TMPBASE/lib +SRCS=$WORKDIR/srcs.tar +TOOL=$WORKDIR/tool.py +rm -rf $WORKDIR +mkdir -p $BSDIR $TMPDIR/cloudify/wheels $TMPDIR/cloudify/sources $TMPDIR/manager +chmod 700 $WORKDIR +cp "$PVTKEY" $PVTKEY2 +cat >$TOOL <<!EOF +#!/usr/local/bin/python +# +import yaml +import sys +bsdir = sys.argv[1] +with open(bsdir + '/simple-manager-blueprint-inputs.yaml', 'r') as f: + inpyaml = yaml.load(f) +with open(bsdir + '/simple-manager-blueprint.yaml', 'r') as f: + bpyaml = yaml.load(f) +for param, value in bpyaml['inputs'].items(): + if value.has_key('default') and not inpyaml.has_key(param): + inpyaml[param] = value['default'] +print inpyaml['manager_resources_package'] +!EOF + +# +# Try to disable attempt to download virtualenv when not needed +# +ssh $SSHOPTS -t -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "echo y; mkdir -p /root/.virtualenv; echo '"'"'[virtualenv]'"'"' >/root/.virtualenv/virtualenv.ini; echo no-download=true >>/root/.virtualenv/virtualenv.ini"' + +# Gather installation artifacts +# from documentation, URL for manager blueprints archive +BSURL=https://github.com/cloudify-cosmo/cloudify-manager-blueprints/archive/3.4.tar.gz +BSFILE=$(basename $BSURL) + +umask 022 +wget -qO- $BSURL >$BSDIR/$BSFILE +cd $BSDIR +tar xzvf $BSFILE +MRPURL=$(python $TOOL $BSDIR/cloudify-manager-blueprints-3.4) +MRPFILE=$(basename $MRPURL) +wget -qO- $MRPURL >$TMPDIR/cloudify/sources/$MRPFILE + +tar cf $SRCS -C $TMPDIR cloudify +rm -rf $TMPBASE +# +# Load required package files onto VM +# +scp $SSHOPTS -i $PVTKEY2 $SRCS $SSHUSER@$PUBIP:/tmp/. +ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "cd /opt; tar xf /tmp/srcs.tar; chown -R root:root /opt/cloudify /opt/manager; rm -rf /tmp/srcs.tar"' +# +# Install config file -- was done by DCAE controller. What now? +# +ssh $SSHOPTS -t -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc '"'"'mkdir -p /opt/dcae; if [ -f /tmp/cfy-config.txt ]; then cp /tmp/cfy-config.txt /opt/dcae/config.txt && chmod 644 /opt/dcae/config.txt; fi'"'" +cd $WORKDIR + +# +# Check for and set up https certificate information +# +rm -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt +ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "openssl pkcs12 -in /opt/app/dcae-certificate/certificate.pkcs12 -passin file:/opt/app/dcae-certificate/.password -nodes -chain"' | awk 'BEGIN{x="/dev/null";}/-----BEGIN CERTIFICATE-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.crt";}/-----BEGIN PRIVATE KEY-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.key";}{print >x;}/-----END /{x="/dev/null";}' +USESSL=false +if [ -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key -a -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt ] +then + USESSL=true +fi +# +# Set up configuration for the bootstrap +# +export CLOUDIFY_USERNAME=admin CLOUDIFY_PASSWORD=encc0fba9f6d618a1a51935b42342b17658 +cd $BSDIR/cloudify-manager-blueprints-3.4 +cp simple-manager-blueprint.yaml bootstrap-blueprint.yaml +ed bootstrap-blueprint.yaml <<'!EOF' +/^node_types:/-1a + plugin_resources: + description: > + Holds any archives that should be uploaded to the manager. + default: [] + dsl_resources: + description: > + Holds a set of dsl required resources + default: [] +. +/^ upload_resources:/a + plugin_resources: { get_input: plugin_resources } +. +w +q +!EOF + +sed <simple-manager-blueprint-inputs.yaml >bootstrap-inputs.yaml \ + -e "s;.*public_ip: .*;public_ip: '$PUBIP';" \ + -e "s;.*private_ip: .*;private_ip: '$PVTIP';" \ + -e "s;.*ssh_user: .*;ssh_user: '$SSHUSER';" \ + -e "s;.*ssh_key_filename: .*;ssh_key_filename: '$PVTKEY2';" \ + -e "s;.*elasticsearch_java_opts: .*;elasticsearch_java_opts: '-Des.cluster.name=$ESMAGIC';" \ + -e "/ssl_enabled: /s/.*/ssl_enabled: $USESSL/" \ + -e "/security_enabled: /s/.*/security_enabled: $USESSL/" \ + -e "/admin_password: /s/.*/admin_password: '$CLOUDIFY_PASSWORD'/" \ + -e "/admin_username: /s/.*/admin_username: '$CLOUDIFY_USERNAME'/" \ + -e "s;.*manager_resources_package: .*;manager_resources_package: 'http://169.254.169.254/nosuchthing/$MRPFILE';" \ + -e "s;.*ignore_bootstrap_validations: .*;ignore_bootstrap_validations: true;" \ + +# Add plugin resources +# TODO Add the other plugins when they're available +cat >>bootstrap-inputs.yaml <<'!EOF' +plugin_resources: + - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-openstack-plugin/1.4/cloudify_openstack_plugin-1.4-py27-none-linux_x86_64-centos-Core.wgn' + - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-fabric-plugin/1.4.1/cloudify_fabric_plugin-1.4.1-py27-none-linux_x86_64-centos-Core.wgn' + - '{{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/plugins/dnsdesig-1.0.0-py27-none-any.wgn' + - '{{ ONAPTEMPLATE_RAWREPOURL_org_onap_ccsdk_platform_plugins_releases }}/plugins/sshkeyshare-1.0.0-py27-none-any.wgn' +!EOF +# +# And away we go +# +cfy init -r +cfy bootstrap --install-plugins -p bootstrap-blueprint.yaml -i bootstrap-inputs.yaml +rm -f resources/ssl/server.key + +# Install Consul VM via a blueprint +cd $STARTDIR +mkdir consul +cd consul +cfy init -r +cfy use -t ${PUBIP} +echo "Deploying Consul VM" + +set +e +if wget -P ../blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/consul_cluster.yaml; then + echo "Succeeded in getting the newest consul_cluster.yaml" +else + echo "Failed to update consul_cluster.yaml, using default version" +fi +set -e +cfy install -p ../blueprints/consul_cluster.yaml -d consul -i ../${INPUTS} -i "datacenter=$LOCATION" + +# Get the floating IP for one member of the cluster +# Needed for instructing the Consul agent on CM host to join the cluster +CONSULIP=$(cfy deployments outputs -d consul | grep -Po 'Value: \K.*') +echo Consul deployed at $CONSULIP + +# Wait for Consul API to come up +until curl http://$CONSULIP:8500/v1/agent/services +do + echo Waiting for Consul API + sleep 60 +done + +# Wait for a leader to be elected +until [[ "$(curl -Ss http://$CONSULIP:8500/v1/status/leader)" != '""' ]] +do + echo Waiting for leader + sleep 30 +done + +# Instruct the client-mode Consul agent running on the CM to join the cluster +curl http://$PUBIP:8500/v1/agent/join/$CONSULIP + +# Register Cloudify Manager in Consul via the local agent on CM host + +REGREQ=" +{ + \"Name\" : \"cloudify_manager\", + \"ID\" : \"cloudify_manager\", + \"Tags\" : [\"http://${PUBIP}/api/v2.1\"], + \"Address\": \"${PUBIP}\", + \"Port\": 80, + \"Check\" : { + \"Name\" : \"cloudify_manager_health\", + \"Interval\" : \"300s\", + \"HTTP\" : \"http://${PUBIP}/api/v2.1/status\", + \"Status\" : \"passing\", + \"DeregisterCriticalServiceAfter\" : \"30m\" + } +} +" + +curl -X PUT -H 'Content-Type: application/json' --data-binary "$REGREQ" http://$PUBIP:8500/v1/agent/service/register +# Make Consul address available to plugins on Cloudify Manager +# TODO probably not necessary anymore +ENVINI=$(mktemp) +cat <<!EOF > $ENVINI +[$LOCATION] +CONSUL_HOST=$CONSULIP +CONFIG_BINDING_SERVICE=config_binding_service +!EOF +scp $SSHOPTS -i ../$PVTKEY $ENVINI $SSHUSER@$PUBIP:/tmp/env.ini +ssh -t $SSHOPTS -i ../$PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/env.ini /opt/env.ini +rm $ENVINI + +while true +do + sleep 300 +done diff --git a/bootstrap/pom.xml b/bootstrap/pom.xml new file mode 100644 index 0000000..06f6977 --- /dev/null +++ b/bootstrap/pom.xml @@ -0,0 +1,173 @@ +<?xml version="1.0"?> +<!-- +================================================================================ +Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. +================================================================================ +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============LICENSE_END========================================================= + +ECOMP is a trademark and service mark of AT&T Intellectual Property. +--> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <modelVersion>4.0.0</modelVersion> + <parent> + <groupId>org.onap.dcaegen2</groupId> + <artifactId>deployments</artifactId> + <version>1.0.0-SNAPSHOT</version> + </parent> + <groupId>org.onap.dcaegen2.deployments</groupId> + <artifactId>bootstrap</artifactId> + <name>bootstrap</name> + <version>1.0.0-SNAPSHOT</version> + <url>http://maven.apache.org</url> + <properties> + <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> + <sonar.skip>true</sonar.skip> + <sonar.sources>.</sonar.sources> + <!-- customize the SONARQUBE URL --> + <!-- sonar.host.url>http://localhost:9000</sonar.host.url --> + <!-- below are language dependent --> + <!-- for Python --> + <sonar.language>py</sonar.language> + <sonar.pluginName>Python</sonar.pluginName> + <sonar.inclusions>**/*.py</sonar.inclusions> + <!-- for JavaScaript --> + <!-- + <sonar.language>js</sonar.language> + <sonar.pluginName>JS</sonar.pluginName> + <sonar.inclusions>**/*.js</sonar.inclusions> + --> + </properties> + <build> + <finalName>${project.artifactId}-${project.version}</finalName> + <plugins> + <!-- plugin> + <artifactId>maven-assembly-plugin</artifactId> + <version>2.4.1</version> + <configuration> + <descriptors> + <descriptor>assembly/dep.xml</descriptor> + </descriptors> + </configuration> + <executions> + <execution> + <id>make-assembly</id> + <phase>package</phase> + <goals> + <goal>single</goal> + </goals> + </execution> + </executions> + </plugin --> + <!-- now we configure custom action (calling a script) at various lifecycle phases --> + <plugin> + <groupId>org.codehaus.mojo</groupId> + <artifactId>exec-maven-plugin</artifactId> + <version>1.2.1</version> + <executions> + <execution> + <id>clean phase script</id> + <phase>clean</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>clean</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>generate-sources script</id> + <phase>generate-sources</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>generate-sources</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>compile script</id> + <phase>compile</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>compile</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>package script</id> + <phase>package</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>package</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>test script</id> + <phase>test</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>test</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>install script</id> + <phase>install</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>install</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>deploy script</id> + <phase>deploy</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>deploy</argument> + </arguments> + </configuration> + </execution> + </executions> + </plugin> + </plugins> + </build> +</project> |