From d2c821277a10f0c746ddb1a99c59a3ef88fb2f1c Mon Sep 17 00:00:00 2001 From: Vijay Venkatesh Kumar Date: Fri, 5 Apr 2019 00:38:59 +0000 Subject: Bulk update to deployment 1) Removed pnda folder (moved to pnda repo) 2) Moved older R3 - heat repo into archive folder 3) Added Dmaap plugin 4) Added new blueprints for Dashboard/helm Change-Id: I82cb8c482a0a35fe8094da825e7403b0fc4ee33b Signed-off-by: Vijay Venkatesh Kumar Issue-ID: DCAEGEN2-1270 Signed-off-by: Vijay Venkatesh Kumar --- archive/bootstrap/Dockerfile | 20 + archive/bootstrap/Dockerfile-template | 20 + archive/bootstrap/README-docker.md | 150 ++++++ archive/bootstrap/installer-docker.sh | 470 +++++++++++++++++++ archive/bootstrap/installer-docker.sh-template | 470 +++++++++++++++++++ archive/bootstrap/pom.xml | 173 +++++++ archive/bootstrap/teardown.sh | 50 ++ archive/cloud_init/cdap-init.sh | 387 ++++++++++++++++ archive/cloud_init/instconsulagentub16.sh | 51 +++ archive/cloud_init/pom.xml | 173 +++++++ archive/heat/build-plugins.sh | 77 ++++ archive/heat/docker-compose-1.yaml | 82 ++++ archive/heat/docker-compose-2.yaml | 99 ++++ archive/heat/docker-compose-3.yaml | 70 +++ archive/heat/docker-compose-4.yaml | 167 +++++++ archive/heat/pom.xml | 158 +++++++ archive/heat/pullall.sh | 40 ++ archive/heat/register.sh | 605 +++++++++++++++++++++++++ archive/heat/setup.sh | 142 ++++++ archive/heat/teardown.sh | 35 ++ bootstrap/Dockerfile-template | 20 - bootstrap/README-docker.md | 150 ------ bootstrap/installer-docker.sh-template | 470 ------------------- bootstrap/pom.xml | 173 ------- bootstrap/teardown.sh | 50 -- cloud_init/cdap-init.sh | 387 ---------------- cloud_init/instconsulagentub16.sh | 51 --- cloud_init/pom.xml | 173 ------- cm-container/Dockerfile | 50 ++ cm-container/scripts/get-type-files.sh | 3 +- healthcheck-container/healthcheck.js | 4 +- heat/docker-compose-1.yaml | 82 ---- heat/docker-compose-2.yaml | 99 ---- heat/docker-compose-3.yaml | 70 --- heat/docker-compose-4.yaml | 167 ------- heat/pom.xml | 158 ------- heat/pullall.sh | 40 -- heat/register.sh | 605 ------------------------- heat/setup.sh | 142 ------ heat/teardown.sh | 35 -- k8s-bootstrap-container/bootstrap.sh | 1 + k8s-bootstrap-container/build-plugins.sh | 1 + k8s-bootstrap-container/load-blueprints.sh | 4 +- k8s-bootstrap-container/pom.xml | 2 +- pnda-bootstrap-container/Dockerfile | 34 -- pnda-bootstrap-container/README.md | 8 - pnda-bootstrap-container/pom.xml | 144 ------ pnda-mirror-container/Dockerfile | 47 -- pnda-mirror-container/README.md | 23 - pnda-mirror-container/pnda-5.0-maint.patch | 13 - pnda-mirror-container/pom.xml | 144 ------ pom.xml | 4 +- settings.xml | 206 +++++++++ tls-init-container/Dockerfile | 23 + 54 files changed, 3730 insertions(+), 3292 deletions(-) create mode 100644 archive/bootstrap/Dockerfile create mode 100644 archive/bootstrap/Dockerfile-template create mode 100644 archive/bootstrap/README-docker.md create mode 100755 archive/bootstrap/installer-docker.sh create mode 100755 archive/bootstrap/installer-docker.sh-template create mode 100644 archive/bootstrap/pom.xml create mode 100755 archive/bootstrap/teardown.sh create mode 100644 archive/cloud_init/cdap-init.sh create mode 100644 archive/cloud_init/instconsulagentub16.sh create mode 100644 archive/cloud_init/pom.xml create mode 100755 archive/heat/build-plugins.sh create mode 100644 archive/heat/docker-compose-1.yaml create mode 100644 archive/heat/docker-compose-2.yaml create mode 100644 archive/heat/docker-compose-3.yaml create mode 100644 archive/heat/docker-compose-4.yaml create mode 100644 archive/heat/pom.xml create mode 100755 archive/heat/pullall.sh create mode 100755 archive/heat/register.sh create mode 100755 archive/heat/setup.sh create mode 100755 archive/heat/teardown.sh delete mode 100644 bootstrap/Dockerfile-template delete mode 100644 bootstrap/README-docker.md delete mode 100755 bootstrap/installer-docker.sh-template delete mode 100644 bootstrap/pom.xml delete mode 100755 bootstrap/teardown.sh delete mode 100644 cloud_init/cdap-init.sh delete mode 100644 cloud_init/instconsulagentub16.sh delete mode 100644 cloud_init/pom.xml create mode 100644 cm-container/Dockerfile delete mode 100644 heat/docker-compose-1.yaml delete mode 100644 heat/docker-compose-2.yaml delete mode 100644 heat/docker-compose-3.yaml delete mode 100644 heat/docker-compose-4.yaml delete mode 100644 heat/pom.xml delete mode 100755 heat/pullall.sh delete mode 100755 heat/register.sh delete mode 100755 heat/setup.sh delete mode 100755 heat/teardown.sh delete mode 100644 pnda-bootstrap-container/Dockerfile delete mode 100644 pnda-bootstrap-container/README.md delete mode 100644 pnda-bootstrap-container/pom.xml delete mode 100644 pnda-mirror-container/Dockerfile delete mode 100644 pnda-mirror-container/README.md delete mode 100644 pnda-mirror-container/pnda-5.0-maint.patch delete mode 100644 pnda-mirror-container/pom.xml create mode 100644 settings.xml create mode 100644 tls-init-container/Dockerfile diff --git a/archive/bootstrap/Dockerfile b/archive/bootstrap/Dockerfile new file mode 100644 index 0000000..2f93801 --- /dev/null +++ b/archive/bootstrap/Dockerfile @@ -0,0 +1,20 @@ +FROM ubuntu:16.04 +MAINTAINER maintainer +ENV INSROOT /opt/app +ENV APPUSER installer +RUN apt-get update\ + && apt-get install -y iputils-ping wget python-virtualenv python-pip ssh ed curl uuid-runtime netcat\ + && apt-get clean\ + && pip install --upgrade pip\ + && mkdir -p ${INSROOT}/${APPUSER}/blueprints\ + && useradd -d ${INSROOT}/${APPUSER} ${APPUSER} +COPY installer-docker.sh ${INSROOT}/${APPUSER}/installer +COPY teardown.sh ${INSROOT}/${APPUSER}/teardown +# COPY *.yaml ${INSROOT}/${APPUSER}/blueprints/ +RUN wget -P ${INSROOT}/${APPUSER}/blueprints/ https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.blueprints/R3/blueprints/centos_vm.yaml +RUN wget -P ${INSROOT}/${APPUSER}/blueprints/ https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.blueprints/R3/blueprints/consul_cluster.yaml +WORKDIR ${INSROOT}/${APPUSER} +RUN chown -R ${APPUSER}:${APPUSER} ${INSROOT}/${APPUSER} && chmod +x ${INSROOT}/${APPUSER}/installer && chmod +x ${INSROOT}/${APPUSER}/teardown +USER ${APPUSER} +ENTRYPOINT exec "${INSROOT}/${APPUSER}/installer" + diff --git a/archive/bootstrap/Dockerfile-template b/archive/bootstrap/Dockerfile-template new file mode 100644 index 0000000..531939a --- /dev/null +++ b/archive/bootstrap/Dockerfile-template @@ -0,0 +1,20 @@ +FROM ubuntu:16.04 +MAINTAINER maintainer +ENV INSROOT /opt/app +ENV APPUSER installer +RUN apt-get update\ + && apt-get install -y iputils-ping wget python-virtualenv python-pip ssh ed curl uuid-runtime netcat\ + && apt-get clean\ + && pip install --upgrade pip\ + && mkdir -p ${INSROOT}/${APPUSER}/blueprints\ + && useradd -d ${INSROOT}/${APPUSER} ${APPUSER} +COPY installer-docker.sh ${INSROOT}/${APPUSER}/installer +COPY teardown.sh ${INSROOT}/${APPUSER}/teardown +# COPY *.yaml ${INSROOT}/${APPUSER}/blueprints/ +RUN wget -P ${INSROOT}/${APPUSER}/blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/centos_vm.yaml +RUN wget -P ${INSROOT}/${APPUSER}/blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/consul_cluster.yaml +WORKDIR ${INSROOT}/${APPUSER} +RUN chown -R ${APPUSER}:${APPUSER} ${INSROOT}/${APPUSER} && chmod +x ${INSROOT}/${APPUSER}/installer && chmod +x ${INSROOT}/${APPUSER}/teardown +USER ${APPUSER} +ENTRYPOINT exec "${INSROOT}/${APPUSER}/installer" + diff --git a/archive/bootstrap/README-docker.md b/archive/bootstrap/README-docker.md new file mode 100644 index 0000000..7e3dedc --- /dev/null +++ b/archive/bootstrap/README-docker.md @@ -0,0 +1,150 @@ +## Dockerized bootstrap for Cloudify Manager and Consul cluster +1. Preparations + + a) The current DCAEGEN2 boot strapping process assumes that the networking in the OpenStack is based on the following model: + + a private network interconnecting the VMs; and an external network that provides "floating" IP addresses for the VMs.A router connects the two networks. Each VM is assigned two IP addresses, one allocated from the private network when the VM is launched. +Then a floating IP is assigned to the VM from the external network. The UUID's of the private and external networks are needed for preparing the inputs.yaml file needed for running the bootstrap container. + + b) Add a public key to openStack, note its name (we will use KEYNAME as example for below). Save the private key (we will use KEYPATH as its path example), make sure its permission is globally readable. + + c) Load the flowing base VM images to OpenStack: a CentOS 7 base image and a Ubuntu 16.04 base image. + + d) Obtain the resource IDs/UUIDs for resources needed by the inputs.yaml file, as explained below, from OpenStack. + +2. On dev machine, set up a directory to hold environment-specific configuration files. Call its path CONFIGDIR. + +3. Put the private key mentioned above into CONFIGDIR as a file named `key`, and make it globally readable. +4. Create a file named `inputs.yaml` in CONFIGDIR + +``` +1 centos7image_id: '7c8d7524-de1f-490b-8418-db294bfa2d65' +2 ubuntu1604image_id: '4b09c18b-d69e-4ba8-a1bd-562cab91ff20' +3 flavor_id: '4' +4 security_group: '55a11193-6559-4f6c-b2d2-0119a9817062' +5 public_net: 'admin_floating_228_net' +6 private_net: 'onap-f-net' +7 openstack: +8 username: 'MY_LOGIN' +9 password: 'MY_PASSWORD' +10 tenant_name: 'TENANT_NAME' +11 auth_url: 'KEYSTONE_AUTH_URL' +12 region: 'RegionOne' +13 keypair: 'KEYNME' +14 key_filename: '/opt/dcae/key' +15 location_prefix: 'onapr1' +16 location_domain: 'onapdevlab.onap.org' +17 codesource_url: 'https://nexus.onap.org/service/local/repositories/raw/content' +18 codesource_version: 'org.onap.dcaegen2.deployments/releases/scripts' +``` +Here is a line-by-line explanation of the parameters + 1. UUID of the OpenStack's CentOD 7 VM image + 2. UUID of the OpenStack's Ubuntu 16.04 VM image + 3. ID of the OpenStack's VM flavor to be used by DCAEGEN2 VMs + 4. UUID of the OpenStack's security group to be used for DCAEGEN2 VMs + 5. The name of the OpenStack network where public IP addresses are allocated from + 6. The name of the OpenStack network where private IP addresses are allocated from + 7. Group header for OpenStack Keystone parameters + 8. User name + 9. Password + 10. Name of the OpenStack tenant/project where DCAEGEN2 VMs are deployed + 11. penstack authentication API URL, for example 'https://horizon.playground.onap.org:5000/v2.0' + 12. Name of the OpenStack region where DCAEGEN2 VMs are deployed, for example 'RegionOne' + 13. Name of the public key uploaded to OpenStack in the Preparation step + 14. Path to the private key within the container (!! Do not change!!) + 15. Prefix (location code) of all DCAEGEN2 VMs + 16. Domain name of the OpenStack tenant 'onapr1.playground.onap.org' + 17. Location of the raw artifact repo hosting additional boot scripts called by DCAEGEN2 VMs' cloud-init, for example: + 'https://nexus.onap.org/service/local/repositories/raw/content' + 18. Path to the boot scripts within the raw artifact repo, for example: 'org.onap.dcaegen2.deployments/releases/scripts' + + +5. Create a file in CONFIGDIR called `invinputs.yaml`. This contains environment-specific information for the inventory service. (TODO: examples only, not the correct values for the ONAP integration environment.) + +``` +1 docker_host_override: "platform_dockerhost" +2 asdc_address: "sdc.onap.org:8443" +3 asdc_uri: "https://sdc.onap.org:8443" +4 asdc_user: "ci" +5 asdc_password: !!str 123456 +6 asdc_environment_name: "ONAP-AMDOCS" +7 postgres_user_inventory: "postgres" +8 postgres_password_inventory: "onap123" +9 service_change_handler_image: "nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.servicechange-handler:latest" +10 inventory_image: "nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.inventory-api:latest +``` +Here is a line-by-line description of the parameters: + 1. The service name for the platform docker host (should be the same in all environments) + 2. The hostname and port of the SDC service + 3. The URI of the SDC service + 4. The SDC username + 5. The SDC password + 6. The SDC environment name + 7. The postgres user name + 8. The postgres password + 9. The Docker image to be used for the service change handler (should be the same in all environments) + 10. The Docker image to be used for the inventory service (should be the same in all environments) + +6. Create a file in CONFIGDIR called `phinputs.yaml`. This contains environment-specific information for the policy handler. + +``` +application_config: + policy_handler : + # parallelize the getConfig queries to policy-engine on each policy-update notification + thread_pool_size : 4 + + # parallelize requests to policy-engine and keep them alive + pool_connections : 20 + + # retry to getConfig from policy-engine on policy-update notification + policy_retry_count : 5 + policy_retry_sleep : 5 + + # policy-engine config + # These are the url of and the auth for the external system, namely the policy-engine (PDP). + # We obtain that info manually from PDP folks at the moment. + # In long run we should figure out a way of bringing that info into consul record + # related to policy-engine itself. + policy_engine : + url : "https://policy-engine.onap.org:8081" + path_decision : "/decision/v1" + path_pdp : "/pdp/" + path_api : "/pdp/api/" + headers : + Accept : "application/json" + "Content-Type" : "application/json" + ClientAuth : "Basic bTAzOTQ5OnBvbGljeVIwY2sk" + Authorization : "Basic dGVzdHBkcDphbHBoYTEyMw==" + Environment : "TEST" + target_entity : "policy_engine" + # deploy_handler config + # changed from string "deployment_handler" in 2.3.1 to structure in 2.4.0 + deploy_handler : + # name of deployment-handler service used by policy-handler for logging + target_entity : "deployment_handler" + # url of the deployment-handler service for policy-handler to direct the policy-updates to + # - expecting dns to resolve the hostname deployment-handler to ip address + url : "http://deployment-handler:8188" + # limit the size of a single data segment for policy-update messages + # from policy-handler to deployment-handler in megabytes + max_msg_length_mb : 5 + query : + # optionally specify the tenant name for the cloudify under deployment-handler + # if not specified the "default_tenant" is used by the deployment-handler + cfy_tenant_name : "default_tenant" +``` +TODO: provide explanations + +7. Pull and run the docker container +``` +docker login -u docker -p docker nexus3.onap.org:10001 +docker pull nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.bootstrap:1.1-latest0 +docker run -d --name boot -v CONFIGDIR:/opt/app/installer/config -e "LOCATION=dg2" nexus3.onap.org:10003/onap/org.onap.dcaegen2.deployments.bootstrap:1.1-latest +``` +The container stays up even after the installation is complete. Using the docker exec command to get inside of the container, then run cfy commands to interact with the Cloudify Manager. + +8. To tear down all of the DCAE installation: + +``` +docker exec -it boot ./teardown +``` diff --git a/archive/bootstrap/installer-docker.sh b/archive/bootstrap/installer-docker.sh new file mode 100755 index 0000000..4f889fc --- /dev/null +++ b/archive/bootstrap/installer-docker.sh @@ -0,0 +1,470 @@ +#!/bin/bash +# +# ============LICENSE_START========================================== +# =================================================================== +# Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END============================================ +# + +# URLs for artifacts needed for installation +DESIGTYPES=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/type_files/dnsdesig/dns_types.yaml +DESIGPLUG=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/dnsdesig-1.0.0-py27-none-any.wgn +SSHKEYTYPES=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/type_files/sshkeyshare/sshkey_types.yaml +SSHKEYPLUG=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/sshkeyshare-1.0.0-py27-none-any.wgn +OSPLUGINZIP=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/archive/1.4.zip +OSPLUGINWGN=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/releases/download/2.2.0/cloudify_openstack_plugin-2.2.0-py27-none-linux_x86_64-centos-Core.wgn + +PLATBPSRC=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.blueprints/releases/blueprints +DOCKERBP=DockerBP.yaml +CBSBP=config_binding_service.yaml +PGBP=pgaas-onevm.yaml +CDAPBP=cdapbp7.yaml +CDAPBROKERBP=cdap_broker.yaml +INVBP=inventory.yaml +DHBP=DeploymentHandler.yaml +PHBP=policy_handler.yaml +VESBP=ves.yaml +TCABP=tca.yaml +HRULESBP=holmes-rules.yaml +HENGINEBP=holmes-engine.yaml +PRHBP=prh.yaml +HVVESBP=hv-ves.yaml + +DOCKERBPURL="${PLATBPSRC}/${DOCKERBP}" +CBSBPURL="${PLATBPSRC}/${CBSBP}" +PGBPURL="${PLATBPSRC}/${PGBP}" +CDAPBPURL="${PLATBPSRC}/${CDAPBP}" +CDAPBROKERBPURL="${PLATBPSRC}/${CDAPBROKERBP}" +INVBPURL="${PLATBPSRC}/${INVBP}" +DHBPURL="${PLATBPSRC}/${DHBP}" +PHBPURL="${PLATBPSRC}/${PHBP}" +VESBPURL="${PLATBPSRC}/${VESBP}" +TCABPURL="${PLATBPSRC}/${TCABP}" +HRULESBPURL="${PLATBPSRC}/${HRULESBP}" +HENGINEBPURL="${PLATBPSRC}/${HENGINEBP}" +PRHBPURL="${PLATBPSRC}/${PRHBP}" +HVVESBPURL="${PLATBPSRC}/${HVVESBP}" + +LOCATIONID=$(printenv LOCATION) + +# Make sure ssh doesn't prompt for new host or choke on a new host with an IP it's seen before +SSHOPTS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" +STARTDIR=$(pwd) + +# clear out files for writing out floating IP addresses +rm -f "$STARTDIR"/config/runtime.ip.consul +rm -f "$STARTDIR"/config/runtime.ip.cm + + +SSHUSER=centos +PVTKEY=./config/key +INPUTS=./config/inputs.yaml + +if [ "$LOCATION" = "" ] +then + echo 'Environment variable LOCATION not set. Should be set to location ID for this installation.' + exit 1 +fi + +set -e +set -x + +# Docker workaround for SSH key +# In order for the container to be able to access the key when it's mounted from the Docker host, +# the key file has to be world-readable. But ssh itself will not work with a private key that's world readable. +# So we make a copy and change permissions on the copy. +# NB -- the key on the Docker host has to be world-readable, which means that, from the host machine, you +# can't use it with ssh. It needs to be a world-readable COPY. +PVTKEY=./key600 +cp ./config/key ${PVTKEY} +chmod 600 ${PVTKEY} + +# Create a virtual environment +virtualenv dcaeinstall +source dcaeinstall/bin/activate + +# forcing pip version (pip>=10.0.0 no longer support use wheel) +pip install pip==9.0.3 + +# Install Cloudify +pip install cloudify==3.4.0 + +# Install the Cloudify OpenStack plugin +wget -qO- ${OSPLUGINZIP} > openstack.zip +pip install openstack.zip + +# Spin up a VM + +# Get the Designate and SSH key type files and plugins +mkdir types +wget -qO- ${DESIGTYPES} > types/dns_types.yaml +wget -qO- ${SSHKEYTYPES} > types/sshkey_types.yaml + +wget -O dnsdesig.wgn ${DESIGPLUG} +wget -O sshkeyshare.wgn ${SSHKEYPLUG} + +wagon install -s dnsdesig.wgn +wagon install -s sshkeyshare.wgn + +## Fix up the inputs file to get the private key locally +sed -e "s#key_filename:.*#key_filename: $PVTKEY#" < ${INPUTS} > /tmp/local_inputs + +# Now install the VM +# Don't exit on error after this point--keep container running so we can do uninstalls after a failure +set +e +if wget -O /tmp/centos_vm.yaml "${PLATBPSRC}"/centos_vm.yaml; then + mv -f /tmp/centos_vm.yaml ./blueprints/ + echo "Succeeded in getting the newest centos_vm.yaml" +else + echo "Failed to update centos_vm.yaml, using default version" + rm -f /tmp/centos_vm.yaml +fi +set -e +cfy local init --install-plugins -p ./blueprints/centos_vm.yaml -i /tmp/local_inputs -i "datacenter=$LOCATION" +cfy local execute -w install --task-retries=10 +PUBIP=$(cfy local outputs | grep -Po '"public_ip": "\K.*?(?=")') + +# wait till the cloudify manager's sshd ready +while ! nc -z -v -w5 ${PUBIP} 22; do echo "."; done +sleep 10 + +echo "Installing Cloudify Manager on ${PUBIP}." +PVTIP=$(ssh $SSHOPTS -i "$PVTKEY" "$SSHUSER"@"$PUBIP" 'echo PVTIP=`curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4`' | grep PVTIP | sed 's/PVTIP=//') +if [ "$PVTIP" = "" ] +then + echo Cannot access specified machine at $PUBIP using supplied credentials + exit +fi + + +# Copy private key onto Cloudify Manager VM +PVTKEYPATH=$(cat ${INPUTS} | grep "key_filename" | cut -d "'" -f2) +PVTKEYNAME=$(basename $PVTKEYPATH) +PVTKEYDIR=$(dirname $PVTKEYPATH) +scp $SSHOPTS -i $PVTKEY $PVTKEY $SSHUSER@$PUBIP:/tmp/$PVTKEYNAME +ssh -t $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mkdir -p $PVTKEYDIR +ssh -t $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/$PVTKEYNAME $PVTKEYPATH + +ESMAGIC=$(uuidgen -r) +WORKDIR=$HOME/cmtmp +BSDIR=$WORKDIR/cmbootstrap +PVTKEY2=$BSDIR/id_rsa.cfybootstrap +TMPBASE=$WORKDIR/tmp +TMPDIR=$TMPBASE/lib +SRCS=$WORKDIR/srcs.tar +TOOL=$WORKDIR/tool.py +rm -rf $WORKDIR +mkdir -p $BSDIR $TMPDIR/cloudify/wheels $TMPDIR/cloudify/sources $TMPDIR/manager +chmod 700 $WORKDIR +cp "$PVTKEY" $PVTKEY2 +cat >$TOOL </root/.virtualenv/virtualenv.ini; echo no-download=true >>/root/.virtualenv/virtualenv.ini"' + +# Gather installation artifacts +# from documentation, URL for manager blueprints archive +BSURL=https://github.com/cloudify-cosmo/cloudify-manager-blueprints/archive/3.4.tar.gz +BSFILE=$(basename $BSURL) + +umask 022 +wget -qO- $BSURL >$BSDIR/$BSFILE +cd $BSDIR +tar xzvf $BSFILE +MRPURL=$(python $TOOL $BSDIR/cloudify-manager-blueprints-3.4) +MRPFILE=$(basename $MRPURL) +wget -qO- $MRPURL >$TMPDIR/cloudify/sources/$MRPFILE + +tar cf $SRCS -C $TMPDIR cloudify +rm -rf $TMPBASE +# +# Load required package files onto VM +# +scp $SSHOPTS -i $PVTKEY2 $SRCS $SSHUSER@$PUBIP:/tmp/. +ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "cd /opt; tar xf /tmp/srcs.tar; chown -R root:root /opt/cloudify /opt/manager; rm -rf /tmp/srcs.tar"' +# +# Install config file -- was done by DCAE controller. What now? +# +ssh $SSHOPTS -t -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc '"'"'mkdir -p /opt/dcae; if [ -f /tmp/cfy-config.txt ]; then cp /tmp/cfy-config.txt /opt/dcae/config.txt && chmod 644 /opt/dcae/config.txt; fi'"'" +cd $WORKDIR + +# +# Check for and set up https certificate information +# +rm -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt +ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "openssl pkcs12 -in /opt/app/dcae-certificate/certificate.pkcs12 -passin file:/opt/app/dcae-certificate/.password -nodes -chain"' | awk 'BEGIN{x="/dev/null";}/-----BEGIN CERTIFICATE-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.crt";}/-----BEGIN PRIVATE KEY-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.key";}{print >x;}/-----END /{x="/dev/null";}' +USESSL=false +if [ -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key -a -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt ] +then + USESSL=true +fi +# +# Set up configuration for the bootstrap +# +export CLOUDIFY_USERNAME=admin CLOUDIFY_PASSWORD=encc0fba9f6d618a1a51935b42342b17658 +cd $BSDIR/cloudify-manager-blueprints-3.4 +cp simple-manager-blueprint.yaml bootstrap-blueprint.yaml +ed bootstrap-blueprint.yaml <<'!EOF' +/^node_types:/-1a + plugin_resources: + description: > + Holds any archives that should be uploaded to the manager. + default: [] + dsl_resources: + description: > + Holds a set of dsl required resources + default: [] +. +/^ upload_resources:/a + plugin_resources: { get_input: plugin_resources } +. +w +q +!EOF + +sed bootstrap-inputs.yaml \ + -e "s;.*public_ip: .*;public_ip: '$PUBIP';" \ + -e "s;.*private_ip: .*;private_ip: '$PVTIP';" \ + -e "s;.*ssh_user: .*;ssh_user: '$SSHUSER';" \ + -e "s;.*ssh_key_filename: .*;ssh_key_filename: '$PVTKEY2';" \ + -e "s;.*elasticsearch_java_opts: .*;elasticsearch_java_opts: '-Des.cluster.name=$ESMAGIC';" \ + -e "/ssl_enabled: /s/.*/ssl_enabled: $USESSL/" \ + -e "/security_enabled: /s/.*/security_enabled: $USESSL/" \ + -e "/admin_password: /s/.*/admin_password: '$CLOUDIFY_PASSWORD'/" \ + -e "/admin_username: /s/.*/admin_username: '$CLOUDIFY_USERNAME'/" \ + -e "s;.*manager_resources_package: .*;manager_resources_package: 'http://169.254.169.254/nosuchthing/$MRPFILE';" \ + -e "s;.*ignore_bootstrap_validations: .*;ignore_bootstrap_validations: true;" \ + +# Add plugin resources +# TODO Maintain plugin list as updates/additions occur +cat >>bootstrap-inputs.yaml <<'!EOF' +plugin_resources: + - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-openstack-plugin/1.4/cloudify_openstack_plugin-1.4-py27-none-linux_x86_64-centos-Core.wgn' + - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-fabric-plugin/1.4.1/cloudify_fabric_plugin-1.4.1-py27-none-linux_x86_64-centos-Core.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/dnsdesig-1.0.0-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/sshkeyshare-1.0.0-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/pgaas-1.0.0-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/cdapcloudify/cdapcloudify-14.2.5-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/dcaepolicyplugin/dcaepolicyplugin-1.0.0-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/dockerplugin/dockerplugin-2.4.0-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/relationshipplugin/relationshipplugin-1.0.0-py27-none-any.wgn' +!EOF +# +# And away we go +# +cfy init -r +cfy bootstrap --install-plugins -p bootstrap-blueprint.yaml -i bootstrap-inputs.yaml +rm -f resources/ssl/server.key + +# Install Consul VM via a blueprint +cd $STARTDIR +mkdir consul +cd consul +cfy init -r +cfy use -t ${PUBIP} +echo "Deploying Consul VM" + +set +e +if wget -O /tmp/consul_cluster.yaml "${PLATBPSRC}"/consul_cluster.yaml; then + mv -f /tmp/consul_cluster.yaml ../blueprints/ + echo "Succeeded in getting the newest consul_cluster.yaml" +else + echo "Failed to update consul_cluster.yaml, using default version" + rm -f /tmp/consul_cluster.yaml +fi +set -e +cfy install -p ../blueprints/consul_cluster.yaml -d consul -i ../${INPUTS} -i "datacenter=$LOCATION" + +# Get the floating IP for one member of the cluster +# Needed for instructing the Consul agent on CM host to join the cluster +CONSULIP=$(cfy deployments outputs -d consul | grep -Po 'Value: \K.*') +echo Consul deployed at $CONSULIP + +# Wait for Consul API to come up +until curl http://$CONSULIP:8500/v1/agent/services +do + echo Waiting for Consul API + sleep 60 +done + +# Wait for a leader to be elected +until [[ "$(curl -Ss http://$CONSULIP:8500/v1/status/leader)" != '""' ]] +do + echo Waiting for leader + sleep 30 +done + +# Instruct the client-mode Consul agent running on the CM to join the cluster +curl http://$PUBIP:8500/v1/agent/join/$CONSULIP + +# Register Cloudify Manager in Consul via the local agent on CM host + +REGREQ=" +{ + \"Name\" : \"cloudify_manager\", + \"ID\" : \"cloudify_manager\", + \"Tags\" : [\"http://${PUBIP}/api/v2.1\"], + \"Address\": \"${PUBIP}\", + \"Port\": 80, + \"Check\" : { + \"Name\" : \"cloudify_manager_health\", + \"Interval\" : \"300s\", + \"HTTP\" : \"http://${PUBIP}/api/v2.1/status\", + \"Status\" : \"passing\", + \"DeregisterCriticalServiceAfter\" : \"30m\" + } +} +" + +curl -X PUT -H 'Content-Type: application/json' --data-binary "$REGREQ" http://$PUBIP:8500/v1/agent/service/register +# Make Consul address available to plugins on Cloudify Manager +# TODO probably not necessary anymore +ENVINI=$(mktemp) +cat < $ENVINI +[$LOCATION] +CONSUL_HOST=$CONSULIP +CONFIG_BINDING_SERVICE=config_binding_service +!EOF +scp $SSHOPTS -i ../$PVTKEY $ENVINI $SSHUSER@$PUBIP:/tmp/env.ini +ssh -t $SSHOPTS -i ../$PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/env.ini /opt/env.ini +rm $ENVINI + + +##### INSTALLATION OF PLATFORM COMPONENTS + +# Get component blueprints +wget -P ./blueprints/docker/ ${DOCKERBPURL} +wget -P ./blueprints/cbs/ ${CBSBPURL} +wget -P ./blueprints/pg/ ${PGBPURL} +wget -P ./blueprints/cdap/ ${CDAPBPURL} +wget -P ./blueprints/cdapbroker/ ${CDAPBROKERBPURL} +wget -P ./blueprints/inv/ ${INVBPURL} +wget -P ./blueprints/dh/ ${DHBPURL} +wget -P ./blueprints/ph/ ${PHBPURL} +wget -P ./blueprints/ves/ ${VESBPURL} +wget -P ./blueprints/tca/ ${TCABPURL} +wget -P ./blueprints/hrules/ ${HRULESBPURL} +wget -P ./blueprints/hengine/ ${HENGINEBPURL} +wget -P ./blueprints/prh/ ${PRHBPURL} +wget -P ./blueprints/hv-ves/ ${HVVESBPURL} + + +# Set up the credentials for access to the Docker registry +curl -X PUT -H "Content-Type: application/json" --data-binary '[{"username":"docker", "password":"docker", "registry": "nexus3.onap.org:10001"}]' http://${CONSULIP}:8500/v1/kv/docker_plugin/docker_logins + +# Install platform Docker host +# Note we're still in the "consul" directory, which is init'ed for talking to CM + +set +e +# Docker host for platform containers +cfy install -v -p ./blueprints/docker/${DOCKERBP} -b DockerBP -d DockerPlatform -i ../${INPUTS} -i "registered_dockerhost_name=platform_dockerhost" -i "registrator_image=onapdcae/registrator:v7" -i "location_id=${LOCATION}" -i "node_name=dokp00" -i "target_datacenter=${LOCATION}" + +# Docker host for service containers +cfy deployments create -b DockerBP -d DockerComponent -i ../${INPUTS} -i "registered_dockerhost_name=component_dockerhost" -i "location_id=${LOCATION}" -i "registrator_image=onapdcae/registrator:v7" -i "node_name=doks00" -i "target_datacenter=${LOCATION}" +cfy executions start -d DockerComponent -w install + +# wait for the extended platform VMs settle +#sleep 180 + + +# CDAP cluster +cfy install -p ./blueprints/cdap/${CDAPBP} -b cdapbp7 -d cdap7 -i ../config/cdapinputs.yaml -i "location_id=${LOCATION}" + +# config binding service +cfy install -p ./blueprints/cbs/${CBSBP} -b config_binding_service -d config_binding_service -i "location_id=${LOCATION}" + + +# Postgres +cfy install -p ./blueprints/pg/${PGBP} -b pgaas -d pgaas -i ../${INPUTS} + + +# Inventory +cfy install -p ./blueprints/inv/${INVBP} -b PlatformServicesInventory -d PlatformServicesInventory -i "location_id=${LOCATION}" -i ../config/invinputs.yaml + + +# Deployment Handler DH +cat >../dhinputs < "$STARTDIR"/config/runtime.ip.consul +echo "$PUBIP" > "$STARTDIR"/config/runtime.ip.cm + + +# Keep the container up +rm -f /tmp/ready_to_exit +while [ ! -e /tmp/ready_to_exit ] +do + sleep 30 +done diff --git a/archive/bootstrap/installer-docker.sh-template b/archive/bootstrap/installer-docker.sh-template new file mode 100755 index 0000000..1364dc1 --- /dev/null +++ b/archive/bootstrap/installer-docker.sh-template @@ -0,0 +1,470 @@ +#!/bin/bash +# +# ============LICENSE_START========================================== +# =================================================================== +# Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END============================================ +# + +# URLs for artifacts needed for installation +DESIGTYPES=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/type_files/dnsdesig/dns_types.yaml +DESIGPLUG=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/dnsdesig-1.0.0-py27-none-any.wgn +SSHKEYTYPES=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/type_files/sshkeyshare/sshkey_types.yaml +SSHKEYPLUG=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/sshkeyshare-1.0.0-py27-none-any.wgn +OSPLUGINZIP=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/archive/1.4.zip +OSPLUGINWGN=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/releases/download/2.2.0/cloudify_openstack_plugin-2.2.0-py27-none-linux_x86_64-centos-Core.wgn + +PLATBPSRC=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.blueprints/releases/blueprints +DOCKERBP=DockerBP.yaml +CBSBP=config_binding_service.yaml +PGBP=pgaas-onevm.yaml +CDAPBP=cdapbp7.yaml +CDAPBROKERBP=cdap_broker.yaml +INVBP=inventory.yaml +DHBP=DeploymentHandler.yaml +PHBP=policy_handler.yaml +VESBP=ves.yaml +TCABP=tca.yaml +HRULESBP=holmes-rules.yaml +HENGINEBP=holmes-engine.yaml +PRHBP=prh.yaml +HVVESBP=hv-ves.yaml + +DOCKERBPURL="${PLATBPSRC}/${DOCKERBP}" +CBSBPURL="${PLATBPSRC}/${CBSBP}" +PGBPURL="${PLATBPSRC}/${PGBP}" +CDAPBPURL="${PLATBPSRC}/${CDAPBP}" +CDAPBROKERBPURL="${PLATBPSRC}/${CDAPBROKERBP}" +INVBPURL="${PLATBPSRC}/${INVBP}" +DHBPURL="${PLATBPSRC}/${DHBP}" +PHBPURL="${PLATBPSRC}/${PHBP}" +VESBPURL="${PLATBPSRC}/${VESBP}" +TCABPURL="${PLATBPSRC}/${TCABP}" +HRULESBPURL="${PLATBPSRC}/${HRULESBP}" +HENGINEBPURL="${PLATBPSRC}/${HENGINEBP}" +PRHBPURL="${PLATBPSRC}/${PRHBP}" +HVVESBPURL="${PLATBPSRC}/${HVVESBP}" + +LOCATIONID=$(printenv LOCATION) + +# Make sure ssh doesn't prompt for new host or choke on a new host with an IP it's seen before +SSHOPTS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" +STARTDIR=$(pwd) + +# clear out files for writing out floating IP addresses +rm -f "$STARTDIR"/config/runtime.ip.consul +rm -f "$STARTDIR"/config/runtime.ip.cm + + +SSHUSER=centos +PVTKEY=./config/key +INPUTS=./config/inputs.yaml + +if [ "$LOCATION" = "" ] +then + echo 'Environment variable LOCATION not set. Should be set to location ID for this installation.' + exit 1 +fi + +set -e +set -x + +# Docker workaround for SSH key +# In order for the container to be able to access the key when it's mounted from the Docker host, +# the key file has to be world-readable. But ssh itself will not work with a private key that's world readable. +# So we make a copy and change permissions on the copy. +# NB -- the key on the Docker host has to be world-readable, which means that, from the host machine, you +# can't use it with ssh. It needs to be a world-readable COPY. +PVTKEY=./key600 +cp ./config/key ${PVTKEY} +chmod 600 ${PVTKEY} + +# Create a virtual environment +virtualenv dcaeinstall +source dcaeinstall/bin/activate + +# forcing pip version (pip>=10.0.0 no longer support use wheel) +pip install pip==9.0.3 + +# Install Cloudify +pip install cloudify==3.4.0 + +# Install the Cloudify OpenStack plugin +wget -qO- ${OSPLUGINZIP} > openstack.zip +pip install openstack.zip + +# Spin up a VM + +# Get the Designate and SSH key type files and plugins +mkdir types +wget -qO- ${DESIGTYPES} > types/dns_types.yaml +wget -qO- ${SSHKEYTYPES} > types/sshkey_types.yaml + +wget -O dnsdesig.wgn ${DESIGPLUG} +wget -O sshkeyshare.wgn ${SSHKEYPLUG} + +wagon install -s dnsdesig.wgn +wagon install -s sshkeyshare.wgn + +## Fix up the inputs file to get the private key locally +sed -e "s#key_filename:.*#key_filename: $PVTKEY#" < ${INPUTS} > /tmp/local_inputs + +# Now install the VM +# Don't exit on error after this point--keep container running so we can do uninstalls after a failure +set +e +if wget -O /tmp/centos_vm.yaml "${PLATBPSRC}"/centos_vm.yaml; then + mv -f /tmp/centos_vm.yaml ./blueprints/ + echo "Succeeded in getting the newest centos_vm.yaml" +else + echo "Failed to update centos_vm.yaml, using default version" + rm -f /tmp/centos_vm.yaml +fi +set -e +cfy local init --install-plugins -p ./blueprints/centos_vm.yaml -i /tmp/local_inputs -i "datacenter=$LOCATION" +cfy local execute -w install --task-retries=10 +PUBIP=$(cfy local outputs | grep -Po '"public_ip": "\K.*?(?=")') + +# wait till the cloudify manager's sshd ready +while ! nc -z -v -w5 ${PUBIP} 22; do echo "."; done +sleep 10 + +echo "Installing Cloudify Manager on ${PUBIP}." +PVTIP=$(ssh $SSHOPTS -i "$PVTKEY" "$SSHUSER"@"$PUBIP" 'echo PVTIP=`curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4`' | grep PVTIP | sed 's/PVTIP=//') +if [ "$PVTIP" = "" ] +then + echo Cannot access specified machine at $PUBIP using supplied credentials + exit +fi + + +# Copy private key onto Cloudify Manager VM +PVTKEYPATH=$(cat ${INPUTS} | grep "key_filename" | cut -d "'" -f2) +PVTKEYNAME=$(basename $PVTKEYPATH) +PVTKEYDIR=$(dirname $PVTKEYPATH) +scp $SSHOPTS -i $PVTKEY $PVTKEY $SSHUSER@$PUBIP:/tmp/$PVTKEYNAME +ssh -t $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mkdir -p $PVTKEYDIR +ssh -t $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/$PVTKEYNAME $PVTKEYPATH + +ESMAGIC=$(uuidgen -r) +WORKDIR=$HOME/cmtmp +BSDIR=$WORKDIR/cmbootstrap +PVTKEY2=$BSDIR/id_rsa.cfybootstrap +TMPBASE=$WORKDIR/tmp +TMPDIR=$TMPBASE/lib +SRCS=$WORKDIR/srcs.tar +TOOL=$WORKDIR/tool.py +rm -rf $WORKDIR +mkdir -p $BSDIR $TMPDIR/cloudify/wheels $TMPDIR/cloudify/sources $TMPDIR/manager +chmod 700 $WORKDIR +cp "$PVTKEY" $PVTKEY2 +cat >$TOOL </root/.virtualenv/virtualenv.ini; echo no-download=true >>/root/.virtualenv/virtualenv.ini"' + +# Gather installation artifacts +# from documentation, URL for manager blueprints archive +BSURL=https://github.com/cloudify-cosmo/cloudify-manager-blueprints/archive/3.4.tar.gz +BSFILE=$(basename $BSURL) + +umask 022 +wget -qO- $BSURL >$BSDIR/$BSFILE +cd $BSDIR +tar xzvf $BSFILE +MRPURL=$(python $TOOL $BSDIR/cloudify-manager-blueprints-3.4) +MRPFILE=$(basename $MRPURL) +wget -qO- $MRPURL >$TMPDIR/cloudify/sources/$MRPFILE + +tar cf $SRCS -C $TMPDIR cloudify +rm -rf $TMPBASE +# +# Load required package files onto VM +# +scp $SSHOPTS -i $PVTKEY2 $SRCS $SSHUSER@$PUBIP:/tmp/. +ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "cd /opt; tar xf /tmp/srcs.tar; chown -R root:root /opt/cloudify /opt/manager; rm -rf /tmp/srcs.tar"' +# +# Install config file -- was done by DCAE controller. What now? +# +ssh $SSHOPTS -t -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc '"'"'mkdir -p /opt/dcae; if [ -f /tmp/cfy-config.txt ]; then cp /tmp/cfy-config.txt /opt/dcae/config.txt && chmod 644 /opt/dcae/config.txt; fi'"'" +cd $WORKDIR + +# +# Check for and set up https certificate information +# +rm -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt +ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "openssl pkcs12 -in /opt/app/dcae-certificate/certificate.pkcs12 -passin file:/opt/app/dcae-certificate/.password -nodes -chain"' | awk 'BEGIN{x="/dev/null";}/-----BEGIN CERTIFICATE-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.crt";}/-----BEGIN PRIVATE KEY-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.key";}{print >x;}/-----END /{x="/dev/null";}' +USESSL=false +if [ -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key -a -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt ] +then + USESSL=true +fi +# +# Set up configuration for the bootstrap +# +export CLOUDIFY_USERNAME=admin CLOUDIFY_PASSWORD=encc0fba9f6d618a1a51935b42342b17658 +cd $BSDIR/cloudify-manager-blueprints-3.4 +cp simple-manager-blueprint.yaml bootstrap-blueprint.yaml +ed bootstrap-blueprint.yaml <<'!EOF' +/^node_types:/-1a + plugin_resources: + description: > + Holds any archives that should be uploaded to the manager. + default: [] + dsl_resources: + description: > + Holds a set of dsl required resources + default: [] +. +/^ upload_resources:/a + plugin_resources: { get_input: plugin_resources } +. +w +q +!EOF + +sed bootstrap-inputs.yaml \ + -e "s;.*public_ip: .*;public_ip: '$PUBIP';" \ + -e "s;.*private_ip: .*;private_ip: '$PVTIP';" \ + -e "s;.*ssh_user: .*;ssh_user: '$SSHUSER';" \ + -e "s;.*ssh_key_filename: .*;ssh_key_filename: '$PVTKEY2';" \ + -e "s;.*elasticsearch_java_opts: .*;elasticsearch_java_opts: '-Des.cluster.name=$ESMAGIC';" \ + -e "/ssl_enabled: /s/.*/ssl_enabled: $USESSL/" \ + -e "/security_enabled: /s/.*/security_enabled: $USESSL/" \ + -e "/admin_password: /s/.*/admin_password: '$CLOUDIFY_PASSWORD'/" \ + -e "/admin_username: /s/.*/admin_username: '$CLOUDIFY_USERNAME'/" \ + -e "s;.*manager_resources_package: .*;manager_resources_package: 'http://169.254.169.254/nosuchthing/$MRPFILE';" \ + -e "s;.*ignore_bootstrap_validations: .*;ignore_bootstrap_validations: true;" \ + +# Add plugin resources +# TODO Maintain plugin list as updates/additions occur +cat >>bootstrap-inputs.yaml <<'!EOF' +plugin_resources: + - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-openstack-plugin/1.4/cloudify_openstack_plugin-1.4-py27-none-linux_x86_64-centos-Core.wgn' + - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-fabric-plugin/1.4.1/cloudify_fabric_plugin-1.4.1-py27-none-linux_x86_64-centos-Core.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/dnsdesig-1.0.0-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/sshkeyshare-1.0.0-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/pgaas-1.0.0-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/cdapcloudify/cdapcloudify-14.2.5-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/dcaepolicyplugin/dcaepolicyplugin-1.0.0-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/dockerplugin/dockerplugin-2.4.0-py27-none-any.wgn' + - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/relationshipplugin/relationshipplugin-1.0.0-py27-none-any.wgn' +!EOF +# +# And away we go +# +cfy init -r +cfy bootstrap --install-plugins -p bootstrap-blueprint.yaml -i bootstrap-inputs.yaml +rm -f resources/ssl/server.key + +# Install Consul VM via a blueprint +cd $STARTDIR +mkdir consul +cd consul +cfy init -r +cfy use -t ${PUBIP} +echo "Deploying Consul VM" + +set +e +if wget -O /tmp/consul_cluster.yaml "${PLATBPSRC}"/consul_cluster.yaml; then + mv -f /tmp/consul_cluster.yaml ../blueprints/ + echo "Succeeded in getting the newest consul_cluster.yaml" +else + echo "Failed to update consul_cluster.yaml, using default version" + rm -f /tmp/consul_cluster.yaml +fi +set -e +cfy install -p ../blueprints/consul_cluster.yaml -d consul -i ../${INPUTS} -i "datacenter=$LOCATION" + +# Get the floating IP for one member of the cluster +# Needed for instructing the Consul agent on CM host to join the cluster +CONSULIP=$(cfy deployments outputs -d consul | grep -Po 'Value: \K.*') +echo Consul deployed at $CONSULIP + +# Wait for Consul API to come up +until curl http://$CONSULIP:8500/v1/agent/services +do + echo Waiting for Consul API + sleep 60 +done + +# Wait for a leader to be elected +until [[ "$(curl -Ss http://$CONSULIP:8500/v1/status/leader)" != '""' ]] +do + echo Waiting for leader + sleep 30 +done + +# Instruct the client-mode Consul agent running on the CM to join the cluster +curl http://$PUBIP:8500/v1/agent/join/$CONSULIP + +# Register Cloudify Manager in Consul via the local agent on CM host + +REGREQ=" +{ + \"Name\" : \"cloudify_manager\", + \"ID\" : \"cloudify_manager\", + \"Tags\" : [\"http://${PUBIP}/api/v2.1\"], + \"Address\": \"${PUBIP}\", + \"Port\": 80, + \"Check\" : { + \"Name\" : \"cloudify_manager_health\", + \"Interval\" : \"300s\", + \"HTTP\" : \"http://${PUBIP}/api/v2.1/status\", + \"Status\" : \"passing\", + \"DeregisterCriticalServiceAfter\" : \"30m\" + } +} +" + +curl -X PUT -H 'Content-Type: application/json' --data-binary "$REGREQ" http://$PUBIP:8500/v1/agent/service/register +# Make Consul address available to plugins on Cloudify Manager +# TODO probably not necessary anymore +ENVINI=$(mktemp) +cat < $ENVINI +[$LOCATION] +CONSUL_HOST=$CONSULIP +CONFIG_BINDING_SERVICE=config_binding_service +!EOF +scp $SSHOPTS -i ../$PVTKEY $ENVINI $SSHUSER@$PUBIP:/tmp/env.ini +ssh -t $SSHOPTS -i ../$PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/env.ini /opt/env.ini +rm $ENVINI + + +##### INSTALLATION OF PLATFORM COMPONENTS + +# Get component blueprints +wget -P ./blueprints/docker/ ${DOCKERBPURL} +wget -P ./blueprints/cbs/ ${CBSBPURL} +wget -P ./blueprints/pg/ ${PGBPURL} +wget -P ./blueprints/cdap/ ${CDAPBPURL} +wget -P ./blueprints/cdapbroker/ ${CDAPBROKERBPURL} +wget -P ./blueprints/inv/ ${INVBPURL} +wget -P ./blueprints/dh/ ${DHBPURL} +wget -P ./blueprints/ph/ ${PHBPURL} +wget -P ./blueprints/ves/ ${VESBPURL} +wget -P ./blueprints/tca/ ${TCABPURL} +wget -P ./blueprints/hrules/ ${HRULESBPURL} +wget -P ./blueprints/hengine/ ${HENGINEBPURL} +wget -P ./blueprints/prh/ ${PRHBPURL} +wget -P ./blueprints/hv-ves/ ${HVVESBPURL} + + +# Set up the credentials for access to the Docker registry +curl -X PUT -H "Content-Type: application/json" --data-binary '[{"username":"docker", "password":"docker", "registry": "nexus3.onap.org:10001"}]' http://${CONSULIP}:8500/v1/kv/docker_plugin/docker_logins + +# Install platform Docker host +# Note we're still in the "consul" directory, which is init'ed for talking to CM + +set +e +# Docker host for platform containers +cfy install -v -p ./blueprints/docker/${DOCKERBP} -b DockerBP -d DockerPlatform -i ../${INPUTS} -i "registered_dockerhost_name=platform_dockerhost" -i "registrator_image=onapdcae/registrator:v7" -i "location_id=${LOCATION}" -i "node_name=dokp00" -i "target_datacenter=${LOCATION}" + +# Docker host for service containers +cfy deployments create -b DockerBP -d DockerComponent -i ../${INPUTS} -i "registered_dockerhost_name=component_dockerhost" -i "location_id=${LOCATION}" -i "registrator_image=onapdcae/registrator:v7" -i "node_name=doks00" -i "target_datacenter=${LOCATION}" +cfy executions start -d DockerComponent -w install + +# wait for the extended platform VMs settle +#sleep 180 + + +# CDAP cluster +cfy install -p ./blueprints/cdap/${CDAPBP} -b cdapbp7 -d cdap7 -i ../config/cdapinputs.yaml -i "location_id=${LOCATION}" + +# config binding service +cfy install -p ./blueprints/cbs/${CBSBP} -b config_binding_service -d config_binding_service -i "location_id=${LOCATION}" + + +# Postgres +cfy install -p ./blueprints/pg/${PGBP} -b pgaas -d pgaas -i ../${INPUTS} + + +# Inventory +cfy install -p ./blueprints/inv/${INVBP} -b PlatformServicesInventory -d PlatformServicesInventory -i "location_id=${LOCATION}" -i ../config/invinputs.yaml + + +# Deployment Handler DH +cat >../dhinputs < "$STARTDIR"/config/runtime.ip.consul +echo "$PUBIP" > "$STARTDIR"/config/runtime.ip.cm + + +# Keep the container up +rm -f /tmp/ready_to_exit +while [ ! -e /tmp/ready_to_exit ] +do + sleep 30 +done diff --git a/archive/bootstrap/pom.xml b/archive/bootstrap/pom.xml new file mode 100644 index 0000000..d2965e9 --- /dev/null +++ b/archive/bootstrap/pom.xml @@ -0,0 +1,173 @@ + + + + 4.0.0 + + org.onap.dcaegen2.deployments + deployments + 1.2.0-SNAPSHOT + + org.onap.dcaegen2.deployments + bootstrap + dcaegen2-deployments-bootstrap + 1.2.0-SNAPSHOT + http://maven.apache.org + + UTF-8 + true + . + + + + + py + Python + **/*.py + + + + + ${project.artifactId}-${project.version} + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + clean phase script + clean + + exec + + + + ${project.artifactId} + clean + + + + + generate-sources script + generate-sources + + exec + + + + ${project.artifactId} + generate-sources + + + + + compile script + compile + + exec + + + + ${project.artifactId} + compile + + + + + package script + package + + exec + + + + ${project.artifactId} + package + + + + + test script + test + + exec + + + + ${project.artifactId} + test + + + + + install script + install + + exec + + + + ${project.artifactId} + install + + + + + deploy script + deploy + + exec + + + + ${project.artifactId} + deploy + + + + + + + + diff --git a/archive/bootstrap/teardown.sh b/archive/bootstrap/teardown.sh new file mode 100755 index 0000000..eb7ed61 --- /dev/null +++ b/archive/bootstrap/teardown.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# +# ============LICENSE_START========================================== +# =================================================================== +# Copyright © 2017 AT&T Intellectual Property. All rights reserved. +# =================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END============================================ +# +# ECOMP and OpenECOMP are trademarks +# and service marks of AT&T Intellectual Property. +# +set -x +set -e + +rm -f /tmp/ready_to_exit + +source ./dcaeinstall/bin/activate +cd ./consul +cfy status +set +e +cfy uninstall -d hengine +cfy uninstall -d hrules +cfy uninstall -d tca +cfy uninstall -d ves +cfy uninstall -d cdapbroker +cfy uninstall -d cdap7 +cfy uninstall -d policy_handler +cfy uninstall -d DeploymentHandler +cfy uninstall -d PlatformServicesInventory +cfy uninstall -d pgaas +cfy uninstall -d config_binding_service +cfy executions start -w uninstall -d DockerComponent +cfy deployments delete -d DockerComponent +cfy uninstall -d DockerPlatform +cfy uninstall -d consul +cd .. +cfy local uninstall + +touch /tmp/ready_to_exit diff --git a/archive/cloud_init/cdap-init.sh b/archive/cloud_init/cdap-init.sh new file mode 100644 index 0000000..d9df3ba --- /dev/null +++ b/archive/cloud_init/cdap-init.sh @@ -0,0 +1,387 @@ +# ============LICENSE_START==================================================== +# org.onap.dcae +# ============================================================================= +# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. +# ============================================================================= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END====================================================== + +set -x +# +# get configuration +# +CODE_SOURCE=$1 +CODE_VERSION=$2 +CLUSTER_INDEX=$3 +CLUSTER_SIZE=$4 +CLUSTER_FQDNS=$5 +CLUSTER_LOCAL_IPS=$6 +CLUSTER_FLOATING_IPS=$7 +DATACENTER=$8 +REGISTERED_NAME=$9 +export JAVA_HOME=/usr/lib/jvm/default-java +md5sum /root/.sshkey/id_rsa | awk '{ print $1 }' >/root/.mysqlpw +chmod 400 /root/.mysqlpw +# +# enable outside apt repositories +# +wget -qO- http://public-repo-1.hortonworks.com/HDP/ubuntu16/2.x/updates/2.6.0.3/hdp.list >/etc/apt/sources.list.d/hdp.list +wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/cask.list >/etc/apt/sources.list.d/cask.list +wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/pubkey.gpg | apt-key add - +apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 B9733A7A07513CAD +apt-get update +# +# install software from apt repositories +# +apt-get install -y default-jdk hadoop-hdfs hadoop-mapreduce hive hbase libsnappy-dev liblzo2-dev hadooplzo spark-master spark-python zip unzip +usermod -a -G hadoop hive +if [ $CLUSTER_INDEX -lt 3 ] +then + apt-get install -y zookeeper-server + cat <>/etc/zookeeper/conf/zookeeper-env.sh +export JAVA_HOME=/usr/lib/jvm/default-java +export ZOOCFGDIR=/etc/zookeeper/conf +export ZOO_LOG_DIR=/var/log/zookeeper +export ZOOPIDFILE=/var/run/zookeeper/zookeeper_server.pid +!EOF + mkdir -p /var/lib/zookeeper + chown zookeeper:zookeeper /var/lib/zookeeper + cp /usr/hdp/current/zookeeper-server/etc/init.d/zookeeper-server /etc/init.d/. + update-rc.d zookeeper-server defaults + service zookeeper-server start +fi +if [ $CLUSTER_INDEX -eq 2 ] +then + debconf-set-selections </usr/hdp/current/spark-client/conf/java-opts +echo "export OPTS=\"\${OPTS} -Dhdp.version=$HDPVER\"" >>/etc/cdap/conf/cdap-env.sh +cat >/etc/profile.d/hadoop.sh <<'!EOF' +HADOOP_PREFIX=/usr/hdp/current/hadoop-client +HADOOP_YARN_HOME=/usr/hdp/current/hadoop-yarn-nodemanager +HADOOP_HOME=/usr/hdp/current/hadoop-client +HADOOP_COMMON_HOME=$HADOOP_HOME +HADOOP_CONF_DIR=/etc/hadoop/conf +HADOOP_HDFS_HOME=/usr/hdp/current/hadoop-hdfs-namenode +HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec +YARN_LOG_DIR=/usr/lib/hadoop-yarn/logs +HADOOP_LOG_DIR=/usr/lib/hadoop/logs +JAVA_HOME=/usr/lib/jvm/default-java +JAVA=$JAVA_HOME/bin/java +PATH=$PATH:$HADOOP_HOME/bin +HBASE_LOG_DIR=/usr/lib/hbase/logs +HADOOP_MAPRED_LOG_DIR=/usr/lib/hadoop-mapreduce/logs +HBASE_CONF_DIR=/etc/hbase/conf +export HADOOP_PREFIX HADOOP_HOME HADOOP_COMMON_HOME HADOOP_CONF_DIR HADOOP_HDFS_HOME JAVA_HOME PATH HADOOP_LIBEXEC_DIR JAVA JARN_LOG_DIR HADOOP_LOG_DIR HBASE_LOG_DIR HADOOP_MAPRED_LOG_DIR HBASE_CONF_DIR +!EOF +chmod 755 /etc/profile.d/hadoop.sh +cat >/etc/hadoop/conf/hadoop-env.sh +mv /root/.sshkey /var/lib/hadoop-hdfs/.ssh +cp /var/lib/hadoop-hdfs/.ssh/id_rsa.pub /var/lib/hadoop-hdfs/.ssh/authorized_keys +>/etc/hadoop/conf/dfs.exclude +>/etc/hadoop/conf/yarn.exclude +chown -R hdfs:hadoop /var/lib/hadoop-hdfs/.ssh /hadoop /usr/lib/hadoop +chown -R yarn:hadoop /usr/lib/hadoop-yarn /hadoop/yarn +chown -R mapred:hadoop /usr/lib/hadoop-mapreduce +chown -R hbase:hbase /usr/lib/hbase +chmod 700 /var/lib/hadoop-hdfs/.ssh +chmod 600 /var/lib/hadoop-hdfs/.ssh/* +sed -i -e '/maxClientCnxns/d' /etc/zookeeper/conf/zoo.cfg + +cat >/tmp/init.py <\n\n" + for n in m.keys(): + a = a + "\n \n {n}\n {v}\n ".format(n=n,v=m[n]) + a = a + "\n\n" + with open(f, 'w') as xml: + xml.write(a) +pxc('/etc/hadoop/conf/core-site.xml', { + 'fs.defaultFS':'hdfs://cl' + }) +pxc('/etc/hadoop/conf/hdfs-site.xml', { + 'dfs.namenode.datanode.registration.ip-hostname-check':'false', + 'dfs.namenode.name.dir':'/hadoop/hdfs/namenode', + 'dfs.hosts.exclude':'/etc/hadoop/conf/dfs.exclude', + 'dfs.datanode.data.dir':'/hadoop/hdfs/data', + 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode', + 'dfs.nameservices':'cl', + 'dfs.ha.namenodes.cl':'nn1,nn2', + 'dfs.namenode.rpc-address.cl.nn1':localips[0]+':8020', + 'dfs.namenode.rpc-address.cl.nn2':localips[1]+':8020', + 'dfs.namenode.http-address.cl.nn1':localips[0]+':50070', + 'dfs.namenode.http-address.cl.nn2':localips[1]+':50070', + 'dfs.namenode.shared.edits.dir':'qjournal://'+localips[0]+':8485;'+localips[1]+':8485;'+localips[2]+':8485/cl', + 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode', + 'dfs.client.failover.proxy.provider.cl':'org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider', + 'dfs.ha.fencing.methods':'sshfence(hdfs),shell(/bin/true)', + 'dfs.ha.fencing.ssh.private-key-files':'/var/lib/hadoop-hdfs/.ssh/id_rsa', + 'dfs.ha.fencing.ssh.connect-timeout':'30000', + 'dfs.ha.automatic-failover.enabled':'true', + 'ha.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181' + }) +pxc('/etc/hadoop/conf/yarn-site.xml', { + 'yarn.nodemanager.vmem-check-enabled':'false', + 'yarn.application.classpath':'/etc/hadoop/conf,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*', + 'yarn.nodemanager.delete.debug-delay-sec':'43200', + 'yarn.scheduler.minimum-allocation-mb':'512', + 'yarn.scheduler.maximum-allocation-mb':'8192', + 'yarn.nodemanager.local-dirs':'/hadoop/yarn/local', + 'yarn.nodemanager.log-dirs':'/hadoop/yarn/log', + 'yarn.resourcemanager.zk-address':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181', + 'yarn.resourcemanager.ha.enabled':'true', + 'yarn.resourcemanager.ha.rm-ids':'rm1,rm2', + 'yarn.resourcemanager.hostname.rm1':localips[1], + 'yarn.resourcemanager.hostname.rm2':localips[2], + 'yarn.resourcemanager.cluster-id':'cl', + 'yarn.resourcemanager.recovery-enabled':'true', + 'yarn.resourcemanager.store.class':'org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore', + 'yarn.resourcemanager.nodes.exclude-path':'/etc/hadoop/conf/yarn.exclude' + }) +pxc('/etc/hadoop/conf/mapred-site.xml', { + 'mapreduce.application.classpath':'/etc/hadoop/conf,/usr/lib/hadoop/lib/*,/usr/lib/hadoop/*,/usr/hdp/current/hadoop-hdfs-namenode/,/usr/hdp/current/hadoop-hdfs-namenode/lib/*,/usr/hdp/current/hadoop-hdfs-namenode/*,/usr/hdp/current/hadoop-yarn-nodemanager/lib/*,/usr/hdp/current/hadoop-yarn-nodemanager/*,/usr/hdp/current/hadoop-mapreduce-historyserver/lib/*,/usr/hdp/current/hadoop-mapreduce-historyserver/*', + 'mapreduce.jobhistory.intermediate-done-dir':'/mr-history/tmp', + 'mapreduce.jobhistory.done-dir':'/mr-history/done', + 'mapreduce.jobhistory.address':localips[1], + 'mapreduce.jobhistory.webapp.address':localips[1] + }) +pxc('/etc/hbase/conf/hbase-site.xml', { + 'hbase.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181', + 'hbase.rootdir':'hdfs://cl/apps/hbase/data', + 'hbase.cluster.distributed':'true' + }) +pxc('/etc/hive/conf/hive-site.xml', { + 'fs.file.impl.disable.cache':'true', + 'fs.hdfs.impl.disable.cache':'true', + 'hadoop.clientside.fs.operations':'true', + 'hive.auto.convert.join.noconditionaltask.size':'1000000000', + 'hive.auto.convert.sortmerge.join.noconditionaltask':'true', + 'hive.auto.convert.sortmerge.join':'true', + 'hive.enforce.bucketing':'true', + 'hive.enforce.sorting':'true', + 'hive.mapjoin.bucket.cache.size':'10000', + 'hive.mapred.reduce.tasks.speculative.execution':'false', + 'hive.metastore.cache.pinobjtypes':'Table,Database,Type,FieldSchema,Order', + 'hive.metastore.client.socket.timeout':'60s', + 'hive.metastore.local':'true', + 'hive.metastore.uris':'thrift://' + fqdns[2] + ':9083', + 'hive.metastore.warehouse.dir':'/apps/hive/warehouse', + 'hive.optimize.bucketmapjoin.sortedmerge':'true', + 'hive.optimize.bucketmapjoin':'true', + 'hive.optimize.mapjoin.mapreduce':'true', + 'hive.optimize.reducededuplication.min.reducer':'1', + 'hive.security.authorization.manager':'org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider', + 'hive.semantic.analyzer.factory.impl':'org.apache.hivealog.cli.HCatSemanticAnalyzerFactory', + 'javax.jdo.option.ConnectionDriverName':'com.mysql.jdbc.Driver', + 'javax.jdo.option.ConnectionPassword': mysqlpw, + 'javax.jdo.option.ConnectionURL':'jdbc:mysql://localhost:3306/metastore?createDatabaseIfNotExist=true', + 'javax.jdo.option.ConnectionUserName':'root' + }) +if myid == 2: + pxc('/etc/cdap/conf/cdap-site.xml', { + 'zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181/\${root.namespace}', + 'router.server.address':localips[2], + 'explore.enabled':'true', + 'enable.unrecoverable.reset':'true', + 'kafka.seed.brokers':localips[2] + ':9092', + 'app.program.jvm.opts':'-XX:MaxPermSize=128M \${twill.jvm.gc.opts} -Dhdp.version=$HDPVER -Dspark.yarn.am.extraJavaOptions=-Dhdp.version=$HDPVER' + }) +with open('/etc/hbase/conf/regionservers', 'w') as f: + for ip in localips: + f.write('{ip}\n'.format(ip=ip)) +with open('/etc/hbase/conf/hbase-env.sh', 'a') as f: + f.write("export HBASE_MANAGES_ZK=false\n") +with open('/etc/zookeeper/conf/zoo.cfg', 'a') as f: + f.write("server.1={L1}:2888:3888\nserver.2={L2}:2888:3888\nserver.3={L3}:2888:3888\nmaxClientCnxns=0\nautopurge.purgeInterval=6\n".format(L1=localips[0],L2=localips[1],L3=localips[2])) +with open('/etc/clustermembers', 'w') as f: + f.write("export me={me}\n".format(me=myid)) + for idx in range(len(localips)): + f.write("export n{i}={ip}\n".format(i=idx, ip=localips[idx])) + f.write("export N{i}={ip}\n".format(i=idx, ip=floatingips[idx])) +with open('/etc/hadoop/conf/slaves', 'w') as f: + for idx in range(len(localips)): + if idx != myid: + f.write("{x}\n".format(x=localips[idx])) +if myid < 3: + with open('/var/lib/zookeeper/myid', 'w') as f: + f.write("{id}".format(id=(myid + 1))) + os.system('service zookeeper-server restart') +for ip in localips: + os.system("su - hdfs -c \"ssh -o StrictHostKeyChecking=no -o NumberOfPasswordPrompts=0 {ip} echo Connectivity to {ip} verified\"".format(ip=ip)) +!EOF + +python /tmp/init.py + +. /etc/clustermembers +waitfor() { + while ( ! nc $1 $2 >/var/log/hive/hive.out 2>>/var/log/hive/hive.log /tmp/cinst.sh + bash /tmp/cinst.sh <>/etc/clustermembers +fi diff --git a/archive/cloud_init/instconsulagentub16.sh b/archive/cloud_init/instconsulagentub16.sh new file mode 100644 index 0000000..87c9f92 --- /dev/null +++ b/archive/cloud_init/instconsulagentub16.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# ============LICENSE_START==================================================== +# org.onap.dcae +# ============================================================================= +# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. +# ============================================================================= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END====================================================== + +CONSULVER=0.8.3 +CONSULNAME=consul_${CONSULVER}_linux_amd64 +CB=/opt/consul/bin +CD=/opt/consul/data +CF=/opt/consul/config +mkdir -p $CB $CD $CF +cat >$CF/consul.json +cd $CB +wget https://releases.hashicorp.com/consul/${CONSULVER}/${CONSULNAME}.zip +unzip ${CONSULNAME}.zip +rm ${CONSULNAME}.zip +mv consul ${CONSULNAME} +ln -s ${CONSULNAME} consul +cat < /lib/systemd/system/consul.service +[Unit] +Description=Consul +Requires=network-online.target +After=network.target +[Service] +Type=simple +ExecStart=/opt/consul/bin/consul agent -config-dir=/opt/consul/config +ExecReload=/bin/kill -HUP \$MAINPID +[Install] +WantedBy=multi-user.target +EOF +systemctl enable consul +systemctl start consul +until /opt/consul/bin/consul join "dcae-cnsl" +do + echo Waiting to join Consul cluster + sleep 60 +done diff --git a/archive/cloud_init/pom.xml b/archive/cloud_init/pom.xml new file mode 100644 index 0000000..7eb0513 --- /dev/null +++ b/archive/cloud_init/pom.xml @@ -0,0 +1,173 @@ + + + + 4.0.0 + + org.onap.dcaegen2.deployments + deployments + 1.2.0-SNAPSHOT + + org.onap.dcaegen2.deployments + cloud_init + dcaegen2-deployments-cloud_init + 1.1.0-SNAPSHOT + http://maven.apache.org + + UTF-8 + true + . + + + + + py + Python + **/*.py + + + + + ${project.artifactId}-${project.version} + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + clean phase script + clean + + exec + + + + ${project.artifactId} + clean + + + + + generate-sources script + generate-sources + + exec + + + + ${project.artifactId} + generate-sources + + + + + compile script + compile + + exec + + + + ${project.artifactId} + compile + + + + + package script + package + + exec + + + + ${project.artifactId} + package + + + + + test script + test + + exec + + + + ${project.artifactId} + test + + + + + install script + install + + exec + + + + ${project.artifactId} + install + + + + + deploy script + deploy + + exec + + + + ${project.artifactId} + deploy + + + + + + + + diff --git a/archive/heat/build-plugins.sh b/archive/heat/build-plugins.sh new file mode 100755 index 0000000..647ef7a --- /dev/null +++ b/archive/heat/build-plugins.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# ============LICENSE_START======================================================= +# org.onap.dcae +# ================================================================================ +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. + +# Pull plugin archives from repos +# Build wagons +# $1 is the DCAE repo URL +# $2 is the CCSDK repo URL +# (This script runs at Docker image build time) +# +set -x +DEST=wagons + +# For DCAE, we get zips of the archives and build wagons +DCAEPLUGINFILES=\ +"\ +k8splugin/1.4.3/k8splugin-1.4.3.tgz +relationshipplugin/1.0.0/relationshipplugin-1.0.0.tgz +dcaepolicyplugin/2.3.0/dcaepolicyplugin-2.3.0.tgz +dockerplugin/3.2.0/dockerplugin-3.2.0.tgz \ +" + +# For CCSDK, we pull down the wagon files directly +CCSDKPLUGINFILES=\ +"\ +plugins/pgaas-1.1.0-py27-none-any.wgn +plugins/sshkeyshare-1.0.0-py27-none-any.wgn +" + +# Build a set of wagon files from archives in a repo +# $1 -- repo base URL +# $2 -- list of paths to archive files in the repo +function build { + for plugin in $2 + do + # Could just do wagon create with the archive URL as source, + # but can't use a requirements file with that approach + mkdir work + target=$(basename ${plugin}) + curl -Ss $1/${plugin} > ${target} + tar zxvf ${target} --strip-components=2 -C work + wagon create -t tar.gz -o ${DEST} -r work/requirements.txt --validate ./work + rm -rf work + done +} + +# Copy a set of wagons from a repo +# $1 -- repo baseURL +# $2 -- list of paths to wagons in the repo +function get_wagons { + for wagon in $2 + do + target=$(basename ${wagon}) + curl -Ss $1/${wagon} > ${DEST}/${target} + done +} + +mkdir ${DEST} +build $1 "${DCAEPLUGINFILES}" +get_wagons $2 "${CCSDKPLUGINFILES}" diff --git a/archive/heat/docker-compose-1.yaml b/archive/heat/docker-compose-1.yaml new file mode 100644 index 0000000..3041d6c --- /dev/null +++ b/archive/heat/docker-compose-1.yaml @@ -0,0 +1,82 @@ +version: '2.1' +services: + pgHolmes: + image: "postgres:9.5" + container_name: "pgHolmes" + restart: "always" + hostname: "phHolmes" + environment: + - "POSTGRES_USER=holmes" + - "POSTGRES_PASSWORD=holmespwd" + ports: + - "5432:5432" + labels: + - "SERVICE_5432_NAME=pgHolmes" + - "SERVICE_5432_CHECK_TCP=true" + - "SERVICE_5432_CHECK_INTERVAL=15s" + - "SERVICE_5432_CHECK_INITIAL_STATUS=passing" + + pgInventory: + image: "postgres:9.5" + container_name: "pgInventory" + restart: "always" + hostname: "pgInventory" + environment: + - "POSTGRES_USER=inventory" + - "POSTGRES_PASSWORD=inventorypwd" + ports: + - "5433:5432" + labels: + - "SERVICE_5432_NAME=pgInventory" + - "SERVICE_5432_CHECK_TCP=true" + - "SERVICE_5432_CHECK_INTERVAL=15s" + - "SERVICE_5432_CHECK_INITIAL_STATUS=passing" + + + consul: + image: "consul:0.8.3" + container_name: "consul" + privileged: true + restart: "always" + hostname: "consul" + ports: + - "8500:8500" + - "53:8600/udp" + - "53:8600/tcp" + environment: + - "DOCKER_HOST=tcp://{{ dcae_ip_addr }}:2376" + command: "agent -ui -server -bootstrap-expect 1 -client 0.0.0.0 -log-level trace -recursor {{ dns_ip_addr }}" + labels: + - "SERVICE_8500_NAME=consul" + - "SERVICE_8500_CHECK_HTTP=/v1/agent/services" + - "SERVICE_8500_CHECK_INTERVAL=15s" + - "SERVICE_8500_CHECK_INITIAL_STATUS=passing" + + + config-binding-service: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.configbinding.app-app:{{ dcae_docker_cbs }}" + container_name: "config_binding_service" + restart: "always" + hostname: "config-binding-service" + environment: + - "CONSUL_HOST=consul" + ports: + - "10000:10000" + depends_on: + - "consul" + - "tls-init" + labels: + - "SERVICE_10000_NAME=config_binding_service" + - "SERVICE_10000_CHECK_HTTP=/healthcheck" + - "SERVICE_10000_CHECK_INTERVAL=15s" + - "SERVICE_10000_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + tls-init: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.tls-init-container:{{ dcae_docker_tls }}" + container_name: "tls-init" + hostname: "tls-init" + volumes: + - "./tls/shared:/opt/tls/shared" diff --git a/archive/heat/docker-compose-2.yaml b/archive/heat/docker-compose-2.yaml new file mode 100644 index 0000000..dca210e --- /dev/null +++ b/archive/heat/docker-compose-2.yaml @@ -0,0 +1,99 @@ +version: '2.1' +services: + + mvp-dcaegen2-collectors-ves: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.ves.vescollector:{{ dcae_docker_ves }}" + container_name: "mvp-dcaegen2-collectors-ves" + restart: "always" + hostname: "mvp-dcaegen2-collectors-ves" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=mvp-dcaegen2-collectors-ves" + - "HOSTNAME=mvp-dcaegen2-collectors-ves" + ports: + - "8081:8080" + labels: + - "SERVICE_8080_NAME=mvp-dcaegen2-collectors-ves" + - "SERVICE_8080_CHECK_HTTP=/healthcheck" + - "SERVICE_8080_CHECK_INTERVAL=15s" + - "SERVICE_8080_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + mvp-dcaegen2-analytics-tca: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.tca-cdap-container:{{ dcae_docker_tca }}" + container_name: "mvp-dcaegen2-analytics-tca" + restart: "always" + hostname: "mvp-dcaegen2-analytics-tca" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "DMAAPPORT=3904" + - "DMAAPPUBTOPIC=unauthenticated.DCAE_CL_OUTPUT" + - "DMAAPSUBTOPIC=unauthenticated.VES_MEASUREMENT_OUTPUT" + - "AAIHOST={{ aai1_ip_addr }}" + - "AAIPORT=8443" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CBS_HOST=config-binding-service" + - "CBS_PORT=10000" + - "SERVICE_NAME=mvp-dcaegen2-analytics-tca" + - "HOSTNAME=mvp-dcaegen2-analytics-tca" + - "CONFIG_BINDING_SERVICE=config_binding_service" + # set the parameter below to enable REDIS caching. + #- REDISHOSTPORT=redis-cluster:6379 + ports: + - "11011:11011" + #- "11015:11015" + labels: + - "SERVICE_11011_NAME=mvp-dcaegen2-analytics-tca" + - "SERVICE_11011_CHECK_HTTP=/cdap/ns/cdap_tca_hi_lo" + - "SERVICE_11011_CHECK_INTERVAL=15s" + - "SERVICE_11011_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + mvp-dcaegen2-analytics-holmes-engine-management: + image: "{{ nexus_docker_repo }}/onap/holmes/engine-management:{{ holmes_docker_em }}" + container_name: "mvp-dcaegen2-analytics-holmes-engine-management" + restart: "always" + hostname: "mvp-dcaegen2-analytics-holmes-engine-management" + environment: + - "URL_JDBC=pgHolmes:5432" + - "JDBC_USERNAME=holmes" + - "JDBC_PASSWORD=holmespwd" + - "MSB_ADDR={{ msb_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "HOSTNAME=mvp-dcaegen2-analytics-holmes-engine-management" + ports: + - "9102:9102" + labels: + - "SERVICE_9102_IGNORE=true" + volumes: + - "./tls/shared:/opt/tls/shared" + + mvp-dcaegen2-analytics-holmes-rule-management: + image: "{{ nexus_docker_repo }}/onap/holmes/rule-management:{{ holmes_docker_rm }}" + container_name: "mvp-dcaegen2-analytics-holmes-rule-management" + restart: "always" + hostname: "mvp-dcaegen2-analytics-holmes-rule-management" + environment: + - "URL_JDBC=pgHolmes:5432" + - "JDBC_USERNAME=holmes" + - "JDBC_PASSWORD=holmespwd" + - "MSB_ADDR={{ msb_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "HOSTNAME=mvp-dcaegen2-analytics-holmes-rule-management" + ports: + - "9101:9101" + labels: + - "SERVICE_9101_IGNORE=true" + volumes: + - "./tls/shared:/opt/tls/shared" diff --git a/archive/heat/docker-compose-3.yaml b/archive/heat/docker-compose-3.yaml new file mode 100644 index 0000000..27dbb38 --- /dev/null +++ b/archive/heat/docker-compose-3.yaml @@ -0,0 +1,70 @@ +version: '2.1' +services: + + inventory: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.inventory-api:{{ dcae_docker_inv }}" + restart: "always" + container_name: "inventory" + hostname: "inventory" + environment: + - "POSTGRES_USER=inventory" + - "POSTGRES_PASSWORD=inventorypwd" + ports: + - "8080:8080" + labels: + - "SERVICE_8080_NAME=inventory" + - "SERVICE_8080_CHECK_HTTP=/dcae-service-types" + - "SERVICE_8080_CHECK_INTERVAL=15s" + - "SERVICE_8080_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + service-change-handler: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.servicechange-handler:{{ dcae_docker_sch }}" + container_name: "service-change-handler" + restart: "always" + hostname: "service-change-handler" + ports: + - "8079:8079" + environment: + - "POSTGRES_USER=inventory" + - "POSTGRES_PASSWORD=inventorypwd" + labels: + - "SERVICE_NAME=service_change_handler" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/health.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + deployment_handler: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.deployment-handler:{{ dcae_docker_dh }}" + container_name: "deployment-handler" + restart: "always" + hostname: "deployment-handler" + environment: + - "CLOUDIFY_PASSWORD=admin" + - "CLOUDIFY_USER=admin" + ports: + - "8188:8443" + volumes: + - "./tls/shared:/opt/app/dh/etc/cert/" + + + policy_handler: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.policy-handler:{{ dcae_docker_ph }}" + container_name: "policy-handler" + restart: "always" + hostname: "policy-handler" + ports: + - "25577:25577" + labels: + - "SERVICE_25577_NAME=policy_handler" + - "SERVICE_25577_CHECK_HTTP=/healthcheck" + - "SERVICE_25577_CHECK_INTERVAL=15s" + - "SERVICE_25577_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/app/policy_handler/etc/tls/certs/" + diff --git a/archive/heat/docker-compose-4.yaml b/archive/heat/docker-compose-4.yaml new file mode 100644 index 0000000..c13562d --- /dev/null +++ b/archive/heat/docker-compose-4.yaml @@ -0,0 +1,167 @@ +version: '2.1' +services: + snmptrap: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.snmptrap:{{ dcae_docker_snmptrap }}" + container_name: "static-dcaegen2-collectors-snmptrap" + restart: "always" + hostname: "static-dcaegen2-collectors-snmptrap" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-collectors-snmptrap" + - "HOSTNAME=static-dcaegen2-collectors-snmptrap" + - "HOSTALIASES=/etc/host.aliases" + ports: + - "162:6162/udp" + labels: + - "SERVICE_NAME=static-dcaegen2-collectors-snmptrap" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/snmptrap/bin/snmptrapd.sh status" + - "SERVICE_CHECK_INTERVAL=300s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + prh: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.prh.prh-app-server:{{ dcae_docker_prh }}" + container_name: "static-dcaegen2-services-prh" + restart: "always" + hostname: "static-dcaegen2-services-prh" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-services-prh" + - "HOSTNAME=static-dcaegen2-services-prh" + - "HOSTALIASES=/etc/host.aliases" + ports: + - "8082:8080" + labels: + - "SERVICE_8082_NAME=static-dcaegen2-services-prh" + - "SERVICE_8082_CHECK_HTTP=/heartbeat" + - "SERVICE_8082_CHECK_INTERVAL=15s" + - "SERVICE_8082_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + hvves: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:{{ dcae_docker_hvves }}" + container_name: "static-dcaegen2-collectors-hvves" + restart: "always" + hostname: "static-dcaegen2-collectors-hvves" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-collectors-hvves" + - "HOSTNAME=static-dcaegen2-collectors-hvves" + - "HOSTALIASES=/etc/host.aliases" + ports: + - "6061:6061" + labels: + - "SERVICE_NAME=static-dcaegen2-collectors-hvves" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/hvves/bin/healthcheck.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + datafile: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.datafile.datafile-app-server:{{ dcae_docker_datafile }}" + container_name: "static-dcaegen2-collectors-datafile" + restart: "always" + hostname: "static-dcaegen2-collectors-datafile" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-collectors-datafile" + - "HOSTNAME=static-dcaegen2-collectors-datafile" + - "HOSTALIASES=/etc/host.aliases" + labels: + - "SERVICE_NAME=static-dcaegen2-collectors-datafile" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + mapper-universalvesadaptor: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.universalvesadaptor:{{ dcae_docker_mua }}" + container_name: "static-dcaegen2-services-mua" + restart: "always" + hostname: "static-dcaegen2-services-mua" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-services-mua" + - "HOSTNAME=static-dcaegen2-services-mua" + - "HOSTALIASES=/etc/host.aliases" + - "MR_DEFAULT_PORT_NUMBER=3904" + - "URL_JDBC=jdbc:postgresql://{{dcae_ip_addr}}:5433/inventory" + - "JDBC_USERNAME=inventory" + - "JDBC_PASSWORD=inventorypwd" + labels: + - "SERVICE_NAME=static-dcaegen2-services-mua" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + mapper-snmp: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.snmpmapper:{{ dcae_docker_msnmp }}" + container_name: "static-dcaegen2-services-msnmp" + restart: "always" + hostname: "static-dcaegen2-services-msnmp" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-services-msnmp" + - "HOSTNAME=static-dcaegen2-services-msnmp" + - "HOSTALIASES=/etc/host.aliases" + - "URL_JDBC=jdbc:postgresql://{{dcae_ip_addr}}:5433/inventory" + - "JDBC_USERNAME=inventory" + - "JDBC_PASSWORD=inventorypwd" + labels: + - "SERVICE_NAME=static-dcaegen2-services-msnmp" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + heartbeat: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.heartbeat:{{ dcae_docker_heartbeat }}" + container_name: "static-dcaegen2-services-heartbeat" + restart: "always" + hostname: "static-dcaegen2-services-heartbeat" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-services-heartbeat" + - "HOSTNAME=static-dcaegen2-services-heartbeat" + - "HOSTALIASES=/etc/host.aliases" + labels: + - "SERVICE_NAME=static-dcaegen2-services-heartbeat" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + diff --git a/archive/heat/pom.xml b/archive/heat/pom.xml new file mode 100644 index 0000000..e21db72 --- /dev/null +++ b/archive/heat/pom.xml @@ -0,0 +1,158 @@ + + + + 4.0.0 + + org.onap.dcaegen2.deployments + deployments + 1.2.0-SNAPSHOT + + org.onap.dcaegen2.deployments + heat + dcaegen2-deployments-heat + 1.0.0-SNAPSHOT + http://maven.apache.org + + UTF-8 + true + + + ${project.artifactId}-${project.version} + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + clean phase script + clean + + exec + + + + ${project.artifactId} + clean + + + + + generate-sources script + generate-sources + + exec + + + + ${project.artifactId} + generate-sources + + + + + compile script + compile + + exec + + + + ${project.artifactId} + compile + + + + + package script + package + + exec + + + + ${project.artifactId} + package + + + + + test script + test + + exec + + + + ${project.artifactId} + test + + + + + install script + install + + exec + + + + ${project.artifactId} + install + + + + + deploy script + deploy + + exec + + + + ${project.artifactId} + deploy + + + + + + + + diff --git a/archive/heat/pullall.sh b/archive/heat/pullall.sh new file mode 100755 index 0000000..42ee1ad --- /dev/null +++ b/archive/heat/pullall.sh @@ -0,0 +1,40 @@ +#!/bin/bash +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# + +docker login {{ nexus_docker_repo }} -u {{ nexus_username }} -p {{ nexus_password }} + +docker pull postgres:9.5 +docker pull consul:0.8.3 +docker pull nginx:latest +docker pull onapdcae/registrator:v7 +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.configbinding.app-app:{{ dcae_docker_cbs }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.ves.vescollector:{{ dcae_docker_ves }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.tca-cdap-container:{{ dcae_docker_tca }} +docker pull {{ nexus_docker_repo }}/onap/holmes/engine-management:{{ holmes_docker_em }} +docker pull {{ nexus_docker_repo }}/onap/holmes/rule-management:{{ holmes_docker_rm }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.inventory-api:{{ dcae_docker_inv }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.servicechange-handler:{{ dcae_docker_sch }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.deployment-handler:{{ dcae_docker_dh }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.policy-handler:{{ dcae_docker_ph }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.snmptrap:{{ dcae_docker_snmptrap }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.prh.prh-app-server:{{ dcae_docker_prh }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:{{ dcae_docker_hvves }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.datafile.datafile-app-server:{{ dcae_docker_datafile }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.universalvesadaptor:{{ dcae_docker_mua }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.snmpmapper:{{ dcae_docker_msnmp }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.heartbeat:{{ dcae_docker_heartbeat }} diff --git a/archive/heat/register.sh b/archive/heat/register.sh new file mode 100755 index 0000000..34c1505 --- /dev/null +++ b/archive/heat/register.sh @@ -0,0 +1,605 @@ +#!/bin/bash + +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# + + + +# We now register services that are not handled by Registrator +# minimum platform components +HOSTNAME_CONSUL="consul" +SRVCNAME_CONSUL="consul" +HOSTNAME_CM="cloudify-manager" +SRVCNAME_CM="cloudify_manager" +HOSTNAME_CBS="config-binding-service" +SRVCNAME_CBS="config_binding_service" + +# R3 MVP service components +HOSTNAME_MVP_VES="mvp-dcaegen2-collectors-ves" +SRVCNAME_MVP_VES="mvp-dcaegen2-collectors-ves" +HOSTNAME_MVP_TCA="mvp-dcaegen2-analytics-tca" +SRVCNAME_MVP_TCA="mvp-dcaegen2-analytics-tca" +HOSTNAME_MVP_HR="mvp-dcaegen2-analytics-holmes-rule-management" +SRVCNAME_MVP_HR="mvp-dcaegen2-analytics-holmes-rule-management" +HOSTNAME_MVP_HE="mvp-dcaegen2-analytics-holmes-engine-management" +SRVCNAME_MVP_HE="mvp-dcaegen2-analytics-holmes-engine-management" + +# R3 PLUS service components +HOSTNAME_STATIC_SNMPTRAP="static-dcaegen2-collectors-snmptrap" +SRVCNAME_STATIC_SNMPTRAP="static-dcaegen2-collectors-snmptrap" +HOSTNAME_STATIC_MAPPER="static-dcaegen2-services-mapper" +SRVCNAME_STATIC_MAPPER="static-dcaegen2-services-mapper" +HOSTNAME_STATIC_HEARTBEAT="static-dcaegen2-services-heartbeat" +SRVCNAME_STATIC_HEARTBEAT="static-dcaegen2-services-heartbeat" +HOSTNAME_STATIC_PRH="static-dcaegen2-services-prh" +SRVCNAME_STATIC_PRH="static-dcaegen2-services-prh" +HOSTNAME_STATIC_HVVES="static-dcaegen2-collectors-hvves" +SRVCNAME_STATIC_HVVES="static-dcaegen2-collectors-hvves" +HOSTNAME_STATIC_DFC="static-dcaegen2-collectors-datafile" +SRVCNAME_STATIC_DFC="static-dcaegen2-collectors-datafile" + + +# registering docker host +SVC_NAME="dockerhost" +SVC_IP="$(cat /opt/config/dcae_float_ip.txt)" +REGREQ=" +{ + \"Name\" : \"${SVC_NAME}\", + \"ID\" : \"${SVC_NAME}\", + \"Address\": \"${SVC_IP}\", + \"Port\": 2376, + \"Check\" : { + \"Name\" : \"${SVC_NAME}_health\", + \"Interval\" : \"15s\", + \"HTTP\" : \"http://${SVC_IP}:2376/containers/registrator/json\", + \"Status\" : \"passing\" + } +} +" +curl -v -X PUT -H 'Content-Type: application/json' \ +--data-binary "$REGREQ" \ +"http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" + +#Add KV for dockerplugin login +REGREQ=" +[ + { + \"username\": \"docker\", + \"password\": \"docker\", + \"registry\": \"nexus3.onap.org:10001\" + } +] +" +curl -v -X PUT -H 'Content-Type: application/json' \ +--data-binary "$REGREQ" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/docker_plugin/docker_logins" + + +# registering deployment handler +SVC_NAME="deployment_handler" +SVC_IP="$(cat /opt/config/dcae_ip_addr.txt)" +REGREQ=" +{ + \"Name\" : \"${SVC_NAME}\", + \"ID\" : \"${SVC_NAME}\", + \"Address\": \"${SVC_IP}\", + \"Port\": 8188, + \"Check\" : { + \"Name\" : \"${SVC_NAME}_health\", + \"Interval\" : \"15s\", + \"HTTP\" : \"https://${SVC_IP}:8188/\", + \"tls_skip_verify\": true, + \"Status\" : \"passing\" + } +} +" +curl -v -X PUT -H 'Content-Type: application/json' \ +--data-binary \ +"$REGREQ" "http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" + + +# registering Holmes services +SVC_NAME="${SRVCNAME_MVP_HR}" +SVC_IP="$(cat /opt/config/dcae_ip_addr.txt)" +REGREQ=" +{ + \"Name\" : \"${SVC_NAME}\", + \"ID\" : \"${SVC_NAME}\", + \"Address\": \"${SVC_IP}\", + \"Port\": 9101, + \"Check\" : { + \"Name\" : \"${SVC_NAME}_health\", + \"Interval\" : \"15s\", + \"HTTP\" : \"https://${SVC_IP}:9101/api/holmes-rule-mgmt/v1/healthcheck\", + \"tls_skip_verify\": true, + \"Status\" : \"passing\" + } +} +" +curl -v -X PUT -H 'Content-Type: application/json' \ +--data-binary \ +"$REGREQ" "http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" + + +SVC_NAME="${SRVCNAME_MVP_HE}" +SVC_IP="$(cat /opt/config/dcae_ip_addr.txt)" +REGREQ=" +{ + \"Name\" : \"${SVC_NAME}\", + \"ID\" : \"${SVC_NAME}\", + \"Address\": \"${SVC_IP}\", + \"Port\": 9102, + \"Check\" : { + \"Name\" : \"${SVC_NAME}_health\", + \"Interval\" : \"15s\", + \"HTTP\" : \"https://${SVC_IP}:9102/api/holmes-engine-mgmt/v1/healthcheck\", + \"tls_skip_verify\": true, + \"Status\" : \"passing\" + } +} +" +curl -v -X PUT -H 'Content-Type: application/json' \ +--data-binary "$REGREQ" \ +"http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" + + + +# now push KVs +# generated with https://www.browserling.com/tools/json-escape +# config binding service +REGKV=" +{} +" +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +http://${HOSTNAME_CONSUL}:8500/v1/kv/config_binding_service +# checked + + + +# inventory +REGKV=' +{ + "database": { + "checkConnectionWhileIdle": false, + "driverClass": "org.postgresql.Driver", + "evictionInterval": "10s", + "initialSize": 2, + "maxSize": 8, + "maxWaitForConnection": "1s", + "minIdleTime": "1 minute", + "minSize": 2, + "password": "inventorypwd", + "properties": { + "charSet": "UTF-8"}, + "url": "jdbc:postgresql://pgInventory:5432/postgres", + "user": "inventory", + "validationQuery": "/* MyService Health Check */ SELECT 1" + }, + "databusControllerConnection": { + "host": "databus-controller-hostname", + "mechId": null, + "password": null, + "port": 8443, + "required": false}, + "httpClient": { + "connectionTimeout": "5000milliseconds", + "gzipEnabled": false, + "gzipEnabledForRequests": false, + "maxThreads": 128, + "minThreads": 1, + "timeout": "5000milliseconds" + } + } + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +http://${HOSTNAME_CONSUL}:8500/v1/kv/inventory +# checked + + +# policy handler +REGKV=' +{ + "policy_handler": { + "deploy_handler": { + "target_entity": "deployment_handler", + "tls_ca_mode": "do_not_verify", + "max_msg_length_mb": 5, + "url" : "https://{{ dcae_ip_addr }}:8188", + "tls_ca_mode" : "cert_directory", + "query": { + "cfy_tenant_name": "default_tenant" + } + }, + "thread_pool_size": 4, + "policy_retry_count": 5, + "pool_connections": 20, + "policy_retry_sleep": 5, + "catch_up": { + "interval": 1200 + }, + "reconfigure": { + "interval": 600 + }, + "policy_engine": { + "path_decision": "/decision/v1", + "path_api": "/pdp/api/", + "path_notifications" : "/pdp/notifications", + "tls_ca_mode" : "cert_directory", + "tls_wss_ca_mode" : "cert_directory", + "headers": { + "Environment": "TEST", + "ClientAuth": "cHl0aG9uOnRlc3Q=", + "Content-Type": "application/json", + "Accept": "application/json", + "Authorization": "Basic dGVzdHBkcDphbHBoYTEyMw==" + }, + "url": "https://{{ policy_ip_addr }}:8081", + "target_entity": "policy_engine" + } + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/policy_handler" + + +# service change handler +REGKV=' +{ + "asdcDistributionClient": { + "asdcAddress": "{{ sdc_ip_addr }}:8443", + "asdcUri": "https://{{ sdc_ip_addr }}:8443", + "msgBusAddress": "{{ mr_ip_addr }}", + "user": "dcae", + "password": "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U", + "pollingInterval": 20, + "pollingTimeout": 20, + "consumerGroup": "dcae", + "consumerId": "dcae-sch", + "environmentName": "AUTO", + "keyStorePath": null, + "keyStorePassword": null, + "activateServerTLSAuth": false, + "useHttpsWithDmaap": false, + "isFilterInEmptyResources": false + }, + "dcaeInventoryClient": { + "uri": "http://inventory:8080" + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/service-change-handler" + + +# deployment handler +REGKV=' +{ + "logLevel": "DEBUG", + "cloudify": { + "protocol": "http" + }, + "inventory": { + "protocol": "http" + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/deployment_handler" + + +# ves +MR_IP="$(cat /opt/config/mr_ip_addr.txt)" +REGKV=' +{ + "event.transform.flag": "0", + "tomcat.maxthreads": "200", + "collector.schema.checkflag": "1", + "collector.dmaap.streamid": "fault=ves_fault|syslog=ves_syslog|heartbeat=ves_heartbeat|measurementsForVfScaling=ves_measurement|mobileFlow=ves_mobileflow|other=ves_other|stateChange=ves_statechange|thresholdCrossingAlert=ves_thresholdCrossingAlert|voiceQuality=ves_voicequality|sipSignaling=ves_sipsignaling", + "collector.service.port": "8080", + "collector.schema.file": "{\"v1\":\"./etc/CommonEventFormat_27.2.json\",\"v2\":\"./etc/CommonEventFormat_27.2.json\",\"v3\":\"./etc/CommonEventFormat_27.2.json\",\"v4\":\"./etc/CommonEventFormat_27.2.json\",\"v5\":\"./etc/CommonEventFormat_28.4.1.json\"}", + "collector.keystore.passwordfile": "/opt/app/VESCollector/etc/passwordfile", + "collector.inputQueue.maxPending": "8096", + "streams_publishes": { + "ves_measurement": { + "type": "message_router", + "dmaap_info": { + "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.VES_MEASUREMENT_OUTPUT/" + } + }, + "ves_fault": { + "type": "message_router", + "dmaap_info": { + "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.SEC_FAULT_OUTPUT/" + } + } + }, + "collector.service.secure.port": "8443", + "header.authflag": "0", + "collector.keystore.file.location": "/opt/app/VESCollector/etc/keystore", + "collector.keystore.alias": "dynamically generated", + "services_calls": [], + "header.authlist": "userid1,base64encodepwd1|userid2,base64encodepwd2" +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcaegen2-collectors-ves" + + +# holmes rule management +MSB_IP="$(cat /opt/config/msb_ip_addr.txt)" +REGKV=" +{ + \"streams_subscribes\": {}, + \"msb.hostname\": \"${MSB_IP_ADDR}\", + \"msb.uri\": \"/api/microservices/v1/services\", + \"streams_publishes\": {}, + \"holmes.default.rule.volte.scenario1\": \"ControlLoop-VOLTE-2179b738-fd36-4843-a71a-a8c24c70c55b\$\$\$package org.onap.holmes.droolsRule;\\n\\nimport org.onap.holmes.common.dmaap.DmaapService;\\nimport org.onap.holmes.common.api.stat.VesAlarm;\\nimport org.onap.holmes.common.aai.CorrelationUtil;\\nimport org.onap.holmes.common.dmaap.entity.PolicyMsg;\\nimport org.onap.holmes.common.dropwizard.ioc.utils.ServiceLocatorHolder;\\nimport org.onap.holmes.common.utils.DroolsLog;\\n \\n\\nrule \\\"Relation_analysis_Rule\\\"\\nsalience 200\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 0,\\n \$sourceId: sourceId, sourceId != null && !sourceId.equals(\\\"\\\"),\\n\\t\\t\\t\$sourceName: sourceName, sourceName \!= null \&\& \!sourceName.equals(\\\"\\\"),\\n\\t\\t\\t\$startEpochMicrosec: startEpochMicrosec,\\n eventName in (\\\"Fault_MultiCloud_VMFailure\\\"),\\n \$eventId: eventId)\\n \$child : VesAlarm( eventId \!= $eventId, parentId == null,\\n CorrelationUtil.getInstance().isTopologicallyRelated(sourceId, \$sourceId, \$sourceName),\\n eventName in (\\\"Fault_MME_eNodeB out of service alarm\\\"),\\n startEpochMicrosec \< \$startEpochMicrosec + 60000 \&\& startEpochMicrosec \> \$startEpochMicrosec - 60000 )\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"Relation_analysis_Rule: rootId=\\\" + \$root.getEventId() + \\\", childId=\\\" + \$child.getEventId());\\n\\t\\t\$child.setParentId(\$root.getEventId());\\n\\t\\tupdate(\$child);\\n\\t\\t\\nend\\n\\nrule \\\"root_has_child_handle_Rule\\\"\\nsalience 150\\nno-loop true\\n\\twhen\\n\\t\\t\$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0, \$eventId: eventId)\\n\\t\\t\$child : VesAlarm(eventId \!= $eventId, parentId == $eventId)\\n\\tthen\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_has_child_handle_Rule: rootId=\\\" + \$root.getEventId() + \\\", childId=\\\" + $child.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, \$child, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\t\$root.setRootFlag(1);\\n\\t\\tupdate(\$root);\\nend\\n\\nrule \\\"root_no_child_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0,\\n sourceId \!= null \&\& \!sourceId.equals(\\\"\\\"),\\n\\t\\t\\tsourceName \!= null \&\& \!sourceName.equals(\\\"\\\"),\\n eventName in (\\\"Fault_MultiCloud_VMFailure\\\"))\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_no_child_handle_Rule: rootId=\\\" + \$root.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, null, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\t$root.setRootFlag(1);\\n\\t\\tupdate(\$root);\\nend\\n\\nrule \\\"root_cleared_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 1, rootFlag == 1)\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_cleared_handle_Rule: rootId=\\\" + \$root.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, null, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\tretract(\$root);\\nend\\n\\nrule \\\"child_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$child : VesAlarm(alarmIsCleared == 1, rootFlag == 0)\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"child_handle_Rule: childId=\\\" + \$child.getEventId());\\n\\t\\tretract(\$child);\\nend\", + \"services_calls\": {} +}" + + + +REGKV=' +{ + "streams_subscribes": {}, + "msb.hostname": "{{ msb_ip_addr }}", + "msb.uri": "/api/microservices/v1/services", + "streams_publishes": {}, + "holmes.default.rule.volte.scenario1": "ControlLoop-VOLTE-2179b738-fd36-4843-a71a-a8c24c70c55b$$$package org.onap.holmes.droolsRule;\n\nimport org.onap.holmes.common.dmaap.DmaapService;\nimport org.onap.holmes.common.api.stat.VesAlarm;\nimport org.onap.holmes.common.aai.CorrelationUtil;\nimport org.onap.holmes.common.dmaap.entity.PolicyMsg;\nimport org.onap.holmes.common.dropwizard.ioc.utils.ServiceLocatorHolder;\nimport org.onap.holmes.common.utils.DroolsLog;\n \n\nrule \"Relation_analysis_Rule\"\nsalience 200\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 0,\n $sourceId: sourceId, sourceId != null && !sourceId.equals(\"\"),\n\t\t\t$sourceName: sourceName, sourceName != null && !sourceName.equals(\"\"),\n\t\t\t$startEpochMicrosec: startEpochMicrosec,\n eventName in (\"Fault_MultiCloud_VMFailure\"),\n $eventId: eventId)\n $child : VesAlarm( eventId != $eventId, parentId == null,\n CorrelationUtil.getInstance().isTopologicallyRelated(sourceId, $sourceId, $sourceName),\n eventName in (\"Fault_MME_eNodeB out of service alarm\"),\n startEpochMicrosec < $startEpochMicrosec + 60000 && startEpochMicrosec > $startEpochMicrosec - 60000 )\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"Relation_analysis_Rule: rootId=\" + $root.getEventId() + \", childId=\" + $child.getEventId());\n\t\t$child.setParentId($root.getEventId());\n\t\tupdate($child);\n\t\t\nend\n\nrule \"root_has_child_handle_Rule\"\nsalience 150\nno-loop true\n\twhen\n\t\t$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0, $eventId: eventId)\n\t\t$child : VesAlarm(eventId != $eventId, parentId == $eventId)\n\tthen\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_has_child_handle_Rule: rootId=\" + $root.getEventId() + \", childId=\" + $child.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, $child, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\t$root.setRootFlag(1);\n\t\tupdate($root);\nend\n\nrule \"root_no_child_handle_Rule\"\nsalience 100\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 0, rootFlag == 0,\n sourceId != null && !sourceId.equals(\"\"),\n\t\t\tsourceName != null && !sourceName.equals(\"\"),\n eventName in (\"Fault_MultiCloud_VMFailure\"))\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_no_child_handle_Rule: rootId=\" + $root.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, null, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\t$root.setRootFlag(1);\n\t\tupdate($root);\nend\n\nrule \"root_cleared_handle_Rule\"\nsalience 100\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 1, rootFlag == 1)\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_cleared_handle_Rule: rootId=\" + $root.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, null, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\tretract($root);\nend\n\nrule \"child_handle_Rule\"\nsalience 100\nno-loop true\n when\n $child : VesAlarm(alarmIsCleared == 1, rootFlag == 0)\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"child_handle_Rule: childId=\" + $child.getEventId());\n\t\tretract($child);\nend", + "services_calls": {} +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcae-analytics-holmes-rule-management" + + + +# Holmes engine management +REGKV=' +{ + "msb.hostname": "10.0.14.1", + "services_calls": {}, + "msb.uri": "/api/microservices/v1/services", + "streams_publishes": { + "dcae_cl_out": { + "type": "message_router", + "dmaap_info": { + "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.DCAE_CL_OUTPUT" + } + } + }, + "streams_subscribes": { + "ves_fault": { + "type": "message_router", + "dmaap_info": { + "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.SEC_FAULT_OUTPUT" + } + } + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcae-analytics-holmes-engine-management" + + +#curl http://localhost:8500/v1/kv/config_binding_service |jq .[0].Value |sed -e 's/\"//g' |base64 --decode + + + +# TCA +REGKV=' +{ + "thresholdCalculatorFlowletInstances": "2", + "tcaVESMessageStatusTableTTLSeconds": "86400", + "tcaVESMessageStatusTableName": "TCAVESMessageStatusTable", + "tcaVESAlertsTableTTLSeconds": "1728000", + "tcaVESAlertsTableName": "TCAVESAlertsTable", + "tcaSubscriberOutputStreamName": "TCASubscriberOutputStream", + "tcaAlertsAbatementTableTTLSeconds": "1728000", + "tcaAlertsAbatementTableName": "TCAAlertsAbatementTable", + "streams_subscribes": {}, + "streams_publishes": {}, + "services_calls": {}, + "appName": "dcae-tca", + "appDescription": "DCAE Analytics Threshold Crossing Alert Application" +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcaegen2-analytics-tca" + + +# TCA pref +REGKV='{ + "tca_policy": "{\"domain\":\"measurementsForVfScaling\",\"metricsPerEventName\":[{\"eventName\":\"vFirewallBroadcastPackets\",\"controlLoopSchemaType\":\"VNF\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":300,\"direction\":\"LESS_OR_EQUAL\",\"severity\":\"MAJOR\",\"closedLoopEventStatus\":\"ONSET\"},{\"closedLoopControlName\":\"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":700,\"direction\":\"GREATER_OR_EQUAL\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]},{\"eventName\":\"vLoadBalancer\",\"controlLoopSchemaType\":\"VM\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":300,\"direction\":\"GREATER_OR_EQUAL\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]},{\"eventName\":\"Measurement_vGMUX\",\"controlLoopSchemaType\":\"VNF\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\",\"thresholdValue\":0,\"direction\":\"EQUAL\",\"severity\":\"MAJOR\",\"closedLoopEventStatus\":\"ABATED\"},{\"closedLoopControlName\":\"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\",\"thresholdValue\":0,\"direction\":\"GREATER\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]}]}", + "subscriberTopicName": "unauthenticated.VES_MEASUREMENT_OUTPUT", + "subscriberTimeoutMS": "-1", + "subscriberProtocol": "http", + "subscriberPollingInterval": "30000", + "subscriberMessageLimit": "-1", + "subscriberHostPort": "3904", + "subscriberHostName":"{{ mr_ip_addr }}", + "subscriberContentType": "application/json", + "subscriberConsumerId": "c12", + "subscriberConsumerGroup": "OpenDCAE-c12", + "publisherTopicName": "unauthenticated.DCAE_CL_OUTPUT", + "publisherProtocol": "http", + "publisherPollingInterval": "20000", + "publisherMaxRecoveryQueueSize": "100000", + "publisherMaxBatchSize": "1", + "publisherHostPort": "3904", + "publisherHostName": "{{ mr_ip_addr }}", + "publisherContentType": "application/json", + "enableAlertCEFFormat": "false", + "enableAAIEnrichment": true, + "aaiVNFEnrichmentAPIPath": "/aai/v11/network/generic-vnfs/generic-vnf", + "aaiVMEnrichmentAPIPath": "/aai/v11/search/nodes-query", + "aaiEnrichmentUserPassword": "DCAE", + "aaiEnrichmentUserName": "DCAE", + "aaiEnrichmentProtocol": "https", + "aaiEnrichmentPortNumber": "8443", + "aaiEnrichmentIgnoreSSLCertificateErrors": "true", + "aaiEnrichmentHost":"{{ aai1_ip_addr }}", + "enableRedisCaching":false +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcaegen2-analytics-tca:preferences" + + + +# SNMP Trap Collector +SERVICENAME="${SRVCNAME_STATIC_SNMPTRAP}" +REGKV='{ + "files": { + "roll_frequency": "day", + "data_dir": "data", + "arriving_traps_log": "snmptrapd_arriving_traps.log", + "minimum_severity_to_log": 2, + "traps_stats_log": "snmptrapd_stats.csv", + "perm_status_file": "snmptrapd_status.log", + "pid_dir": "tmp", + "eelf_audit": "audit.log", + "log_dir": "logs", + "eelf_metrics": "metrics.log", + "eelf_base_dir": "/opt/app/snmptrap/logs", + "runtime_base_dir": "/opt/app/snmptrap", + "eelf_error": "error.log", + "eelf_debug": "debug.log", + "snmptrapd_diag": "snmptrapd_prog_diag.log" + }, + "publisher": { + "http_milliseconds_between_retries": 750, + "max_milliseconds_between_publishes": 10000, + "max_traps_between_publishes": 10, + "http_retries": 3, + "http_primary_publisher": "true", + "http_milliseconds_timeout": 1500, + "http_peer_publisher": "unavailable" + }, + "snmptrapd": { + "version": "1.4.0", + "title": "Collector for receiving SNMP traps and publishing to DMAAP/MR" + }, + "cache": { + "dns_cache_ttl_seconds": 60 + }, + "sw_interval_in_seconds": 60, + "streams_publishes": { + "sec_fault_unsecure": { + "type": "message_router", + "dmaap_info": { + "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.ONAP-COLLECTOR-SNMPTRAP" + } + } + }, + "StormWatchPolicy": "", + "services_calls": {}, + "protocols": { + "ipv4_interface": "0.0.0.0", + "ipv4_port": 6162, + "ipv6_interface": "::1", + "ipv6_port": 6162 + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" + + + +# hv-ves collector +SERVICENAME="${SRVCNAME_STATIC_HVVES}" +REGKV='{ + "dmaap.kafkaBootstrapServers": "{{ mr_ip_addr }}:9092", + "collector.routing": { + "fromDomain": "HVMEAS", + "toTopic": "HV_VES_MEASUREMENTS" + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" + + +# data file collector +SERVICENAME="${SRVCNAME_STATIC_DFC}" + REGKV='{ + "dmaap.dmaapConsumerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", + "dmaap.dmaapConsumerConfiguration.dmaapPortNumber": 2222, + "dmaap.dmaapConsumerConfiguration.dmaapTopicName": "/events/unauthenticated.VES_NOTIFICATION_OUTPUT", + "dmaap.dmaapConsumerConfiguration.dmaapProtocol": "http", + "dmaap.dmaapConsumerConfiguration.dmaapUserName": "", + "dmaap.dmaapConsumerConfiguration.dmaapUserPassword": "", + "dmaap.dmaapConsumerConfiguration.dmaapContentType": "application/json", + "dmaap.dmaapConsumerConfiguration.consumerId": "C12", + "dmaap.dmaapConsumerConfiguration.consumerGroup": "OpenDcae-c12", + "dmaap.dmaapConsumerConfiguration.timeoutMs": -1, + "dmaap.dmaapConsumerConfiguration.messageLimit": 1, + "dmaap.dmaapProducerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", + "dmaap.dmaapProducerConfiguration.dmaapPortNumber": 3907, + "dmaap.dmaapProducerConfiguration.dmaapTopicName": "publish", + "dmaap.dmaapProducerConfiguration.dmaapProtocol": "https", + "dmaap.dmaapProducerConfiguration.dmaapUserName": "dradmin", + "dmaap.dmaapProducerConfiguration.dmaapUserPassword": "dradmin", + "dmaap.dmaapProducerConfiguration.dmaapContentType": "application/octet-stream", + "ftp.ftpesConfiguration.keyCert": "config/ftpKey.jks", + "ftp.ftpesConfiguration.keyPassword": "secret", + "ftp.ftpesConfiguration.trustedCA": "config/cacerts", + "ftp.ftpesConfiguration.trustedCAPassword": "secret" + }' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" + + +# PNF Registration Handler +SERVICENAME="${SRVCNAME_STATIC_PRH}" +REGKV='{ + "dmaap.dmaapProducerConfiguration.dmaapTopicName": "/events/unauthenticated.PNF_READY", + "dmaap.dmaapConsumerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", + "aai.aaiClientConfiguration.aaiPnfPath": "/network/pnfs/pnf", + "aai.aaiClientConfiguration.aaiUserPassword": "AAI", + "dmaap.dmaapConsumerConfiguration.dmaapUserName": "admin", + "aai.aaiClientConfiguration.aaiBasePath": "/aai/v12", + "dmaap.dmaapConsumerConfiguration.timeoutMs": -1, + "dmaap.dmaapProducerConfiguration.dmaapPortNumber": 3904, + "aai.aaiClientConfiguration.aaiHost": "{{ aai1_ip_addr }}", + "dmaap.dmaapConsumerConfiguration.dmaapUserPassword": "admin", + "dmaap.dmaapProducerConfiguration.dmaapProtocol": "http", + "aai.aaiClientConfiguration.aaiIgnoreSslCertificateErrors": true, + "dmaap.dmaapProducerConfiguration.dmaapContentType": "application/json", + "dmaap.dmaapConsumerConfiguration.dmaapTopicName": "/events/unauthenticated.VES_PNFREG_OUTPUT", + "dmaap.dmaapConsumerConfiguration.dmaapPortNumber": 3904, + "dmaap.dmaapConsumerConfiguration.dmaapContentType": "application/json", + "dmaap.dmaapConsumerConfiguration.messageLimit": -1, + "dmaap.dmaapConsumerConfiguration.dmaapProtocol": "http", + "aai.aaiClientConfiguration.aaiUserName": "AAI", + "dmaap.dmaapConsumerConfiguration.consumerId": "c12", + "dmaap.dmaapProducerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", + "aai.aaiClientConfiguration.aaiHostPortNumber": 8443, + "dmaap.dmaapConsumerConfiguration.consumerGroup": "OpenDCAE-c12", + "aai.aaiClientConfiguration.aaiProtocol": "https", + "dmaap.dmaapProducerConfiguration.dmaapUserName": "admin", + "dmaap.dmaapProducerConfiguration.dmaapUserPassword": "admin" +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" diff --git a/archive/heat/setup.sh b/archive/heat/setup.sh new file mode 100755 index 0000000..b95e56e --- /dev/null +++ b/archive/heat/setup.sh @@ -0,0 +1,142 @@ +#!/bin/bash +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# + + +NETWORK="config_default" + +echo "Cleaning up any previously deployed cludify manager and registrator" +docker stop registrator cloudify-manager +docker rm registrator cloudify-manager + +echo "Launching registrator on dockerhost" +docker run -d \ +--network=${NETWORK} \ +--name=registrator \ +-e EXTERNAL_IP={{ dcae_ip_addr }} \ +-e CONSUL_HOST=consul \ +-v /var/run/docker.sock:/tmp/docker.sock \ +onapdcae/registrator:v7 + + + + +rm -rf scripts-in-container +mkdir scripts-in-container +cat > scripts-in-container/install-plugins.sh << EOL +#!/bin/bash +source /cfy42/bin/activate +pip install pip==9.0.3 +cfy profiles use 127.0.0.1 -u admin -p admin -t default_tenant +cfy status +cd /tmp/bin +./build-plugins.sh https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R4 https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases +for wagon in ./wagons/*.wgn; do cfy plugins upload \$wagon ; done +deactivate +EOL + +#wget -O scripts-in-container/build-plugins.sh https://git.onap.org/dcaegen2/deployments/plain/k8s-bootstrap-container/build-plugins.sh +cat > scripts-in-container/build-plugins.sh << EOL +#!/bin/bash + +# Pull plugin archives from repos +# Build wagons +# $1 is the DCAE repo URL +# $2 is the CCSDK repo URL +# (This script runs at Docker image build time) +# +set -x +DEST=wagons + +# For DCAE, we get zips of the archives and build wagons +DCAEPLUGINFILES=\ +"\ +relationshipplugin/1.0.0/relationshipplugin-1.0.0.tgz +dcaepolicyplugin/2.3.0/dcaepolicyplugin-2.3.0.tgz +dockerplugin/3.2.1/dockerplugin-3.2.1.tgz \ +" + +# For CCSDK, we pull down the wagon files directly +CCSDKPLUGINFILES=\ +"\ +plugins/pgaas-1.1.0-py27-none-any.wgn +plugins/sshkeyshare-1.0.0-py27-none-any.wgn +" + +# Build a set of wagon files from archives in a repo +# $1 -- repo base URL +# $2 -- list of paths to archive files in the repo +function build { + for plugin in $2 + do + # Could just do wagon create with the archive URL as source, + # but can't use a requirements file with that approach + mkdir work + target=$(basename ${plugin}) + curl -Ss $1/${plugin} > ${target} + tar zxvf ${target} --strip-components=2 -C work + wagon create -t tar.gz -o ${DEST} -r work/requirements.txt --validate ./work + rm -rf work + done +} + +# Copy a set of wagons from a repo +# $1 -- repo baseURL +# $2 -- list of paths to wagons in the repo +function get_wagons { + for wagon in $2 + do + target=$(basename ${wagon}) + curl -Ss $1/${wagon} > ${DEST}/${target} + done +} + +mkdir ${DEST} +build $1 "${DCAEPLUGINFILES}" +get_wagons $2 "${CCSDKPLUGINFILES}" +EOL + +chmod 777 scripts-in-container/* + +echo "Launching Cloudify Manager container" +docker run -d \ +--network="${NETWORK}" \ +--name cloudify-manager \ +--restart unless-stopped \ +-v /sys/fs/cgroup:/sys/fs/cgroup:ro \ +-v /opt/app/config/scripts-in-container:/tmp/bin \ +-p 80:80 \ +--tmpfs /run \ +--tmpfs /run/lock \ +--security-opt seccomp:unconfined \ +--cap-add SYS_ADMIN \ +--label "SERVICE_80_NAME=cloudify_manager" \ +--label "SERVICE_80_CHECK_TCP=true" \ +--label "SERVICE_80_CHECK_INTERVAL=15s" \ +--label "SERVICE_80_CHECK_INITIAL_STATUS=passing" \ +{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.cm-container:{{ dcae_docker_cm }} + +echo "Cloudify Manager deployed, waiting for completion" +while ! nc -z localhost 80; do sleep 1; done + +echo "Upload plugins to Cloudify Manager" + +# run as detached because this script is intended to be run in background +docker exec -itd cloudify-manager /tmp/bin/install-plugins.sh + +echo "Cloudify Manager setup complete" + diff --git a/archive/heat/teardown.sh b/archive/heat/teardown.sh new file mode 100755 index 0000000..19d74a7 --- /dev/null +++ b/archive/heat/teardown.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# + + +cd /opt/app/config + +echo "Stop and remove cloudify-manager registrator dcae-health" +docker stop cloudify-manager registrator dcae-health +docker rm cloudify-manager registrator dcae-health + +echo "Stand down R2PLUS service components" +/opt/docker/docker-compose -f ./docker-compose-4.yaml down +echo "Stand down R2 platform components" +/opt/docker/docker-compose -f ./docker-compose-3.yaml down +echo "Stand down R2 minimum service components" +/opt/docker/docker-compose -f ./docker-compose-2.yaml down +echo "Stand down R2 shared platform components" +/opt/docker/docker-compose -f ./docker-compose-1.yaml down +echo "Teardown done" diff --git a/bootstrap/Dockerfile-template b/bootstrap/Dockerfile-template deleted file mode 100644 index 531939a..0000000 --- a/bootstrap/Dockerfile-template +++ /dev/null @@ -1,20 +0,0 @@ -FROM ubuntu:16.04 -MAINTAINER maintainer -ENV INSROOT /opt/app -ENV APPUSER installer -RUN apt-get update\ - && apt-get install -y iputils-ping wget python-virtualenv python-pip ssh ed curl uuid-runtime netcat\ - && apt-get clean\ - && pip install --upgrade pip\ - && mkdir -p ${INSROOT}/${APPUSER}/blueprints\ - && useradd -d ${INSROOT}/${APPUSER} ${APPUSER} -COPY installer-docker.sh ${INSROOT}/${APPUSER}/installer -COPY teardown.sh ${INSROOT}/${APPUSER}/teardown -# COPY *.yaml ${INSROOT}/${APPUSER}/blueprints/ -RUN wget -P ${INSROOT}/${APPUSER}/blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/centos_vm.yaml -RUN wget -P ${INSROOT}/${APPUSER}/blueprints/ {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2_platform_blueprints_releases }}/blueprints/consul_cluster.yaml -WORKDIR ${INSROOT}/${APPUSER} -RUN chown -R ${APPUSER}:${APPUSER} ${INSROOT}/${APPUSER} && chmod +x ${INSROOT}/${APPUSER}/installer && chmod +x ${INSROOT}/${APPUSER}/teardown -USER ${APPUSER} -ENTRYPOINT exec "${INSROOT}/${APPUSER}/installer" - diff --git a/bootstrap/README-docker.md b/bootstrap/README-docker.md deleted file mode 100644 index 7e3dedc..0000000 --- a/bootstrap/README-docker.md +++ /dev/null @@ -1,150 +0,0 @@ -## Dockerized bootstrap for Cloudify Manager and Consul cluster -1. Preparations - - a) The current DCAEGEN2 boot strapping process assumes that the networking in the OpenStack is based on the following model: - - a private network interconnecting the VMs; and an external network that provides "floating" IP addresses for the VMs.A router connects the two networks. Each VM is assigned two IP addresses, one allocated from the private network when the VM is launched. -Then a floating IP is assigned to the VM from the external network. The UUID's of the private and external networks are needed for preparing the inputs.yaml file needed for running the bootstrap container. - - b) Add a public key to openStack, note its name (we will use KEYNAME as example for below). Save the private key (we will use KEYPATH as its path example), make sure its permission is globally readable. - - c) Load the flowing base VM images to OpenStack: a CentOS 7 base image and a Ubuntu 16.04 base image. - - d) Obtain the resource IDs/UUIDs for resources needed by the inputs.yaml file, as explained below, from OpenStack. - -2. On dev machine, set up a directory to hold environment-specific configuration files. Call its path CONFIGDIR. - -3. Put the private key mentioned above into CONFIGDIR as a file named `key`, and make it globally readable. -4. Create a file named `inputs.yaml` in CONFIGDIR - -``` -1 centos7image_id: '7c8d7524-de1f-490b-8418-db294bfa2d65' -2 ubuntu1604image_id: '4b09c18b-d69e-4ba8-a1bd-562cab91ff20' -3 flavor_id: '4' -4 security_group: '55a11193-6559-4f6c-b2d2-0119a9817062' -5 public_net: 'admin_floating_228_net' -6 private_net: 'onap-f-net' -7 openstack: -8 username: 'MY_LOGIN' -9 password: 'MY_PASSWORD' -10 tenant_name: 'TENANT_NAME' -11 auth_url: 'KEYSTONE_AUTH_URL' -12 region: 'RegionOne' -13 keypair: 'KEYNME' -14 key_filename: '/opt/dcae/key' -15 location_prefix: 'onapr1' -16 location_domain: 'onapdevlab.onap.org' -17 codesource_url: 'https://nexus.onap.org/service/local/repositories/raw/content' -18 codesource_version: 'org.onap.dcaegen2.deployments/releases/scripts' -``` -Here is a line-by-line explanation of the parameters - 1. UUID of the OpenStack's CentOD 7 VM image - 2. UUID of the OpenStack's Ubuntu 16.04 VM image - 3. ID of the OpenStack's VM flavor to be used by DCAEGEN2 VMs - 4. UUID of the OpenStack's security group to be used for DCAEGEN2 VMs - 5. The name of the OpenStack network where public IP addresses are allocated from - 6. The name of the OpenStack network where private IP addresses are allocated from - 7. Group header for OpenStack Keystone parameters - 8. User name - 9. Password - 10. Name of the OpenStack tenant/project where DCAEGEN2 VMs are deployed - 11. penstack authentication API URL, for example 'https://horizon.playground.onap.org:5000/v2.0' - 12. Name of the OpenStack region where DCAEGEN2 VMs are deployed, for example 'RegionOne' - 13. Name of the public key uploaded to OpenStack in the Preparation step - 14. Path to the private key within the container (!! Do not change!!) - 15. Prefix (location code) of all DCAEGEN2 VMs - 16. Domain name of the OpenStack tenant 'onapr1.playground.onap.org' - 17. Location of the raw artifact repo hosting additional boot scripts called by DCAEGEN2 VMs' cloud-init, for example: - 'https://nexus.onap.org/service/local/repositories/raw/content' - 18. Path to the boot scripts within the raw artifact repo, for example: 'org.onap.dcaegen2.deployments/releases/scripts' - - -5. Create a file in CONFIGDIR called `invinputs.yaml`. This contains environment-specific information for the inventory service. (TODO: examples only, not the correct values for the ONAP integration environment.) - -``` -1 docker_host_override: "platform_dockerhost" -2 asdc_address: "sdc.onap.org:8443" -3 asdc_uri: "https://sdc.onap.org:8443" -4 asdc_user: "ci" -5 asdc_password: !!str 123456 -6 asdc_environment_name: "ONAP-AMDOCS" -7 postgres_user_inventory: "postgres" -8 postgres_password_inventory: "onap123" -9 service_change_handler_image: "nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.servicechange-handler:latest" -10 inventory_image: "nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.inventory-api:latest -``` -Here is a line-by-line description of the parameters: - 1. The service name for the platform docker host (should be the same in all environments) - 2. The hostname and port of the SDC service - 3. The URI of the SDC service - 4. The SDC username - 5. The SDC password - 6. The SDC environment name - 7. The postgres user name - 8. The postgres password - 9. The Docker image to be used for the service change handler (should be the same in all environments) - 10. The Docker image to be used for the inventory service (should be the same in all environments) - -6. Create a file in CONFIGDIR called `phinputs.yaml`. This contains environment-specific information for the policy handler. - -``` -application_config: - policy_handler : - # parallelize the getConfig queries to policy-engine on each policy-update notification - thread_pool_size : 4 - - # parallelize requests to policy-engine and keep them alive - pool_connections : 20 - - # retry to getConfig from policy-engine on policy-update notification - policy_retry_count : 5 - policy_retry_sleep : 5 - - # policy-engine config - # These are the url of and the auth for the external system, namely the policy-engine (PDP). - # We obtain that info manually from PDP folks at the moment. - # In long run we should figure out a way of bringing that info into consul record - # related to policy-engine itself. - policy_engine : - url : "https://policy-engine.onap.org:8081" - path_decision : "/decision/v1" - path_pdp : "/pdp/" - path_api : "/pdp/api/" - headers : - Accept : "application/json" - "Content-Type" : "application/json" - ClientAuth : "Basic bTAzOTQ5OnBvbGljeVIwY2sk" - Authorization : "Basic dGVzdHBkcDphbHBoYTEyMw==" - Environment : "TEST" - target_entity : "policy_engine" - # deploy_handler config - # changed from string "deployment_handler" in 2.3.1 to structure in 2.4.0 - deploy_handler : - # name of deployment-handler service used by policy-handler for logging - target_entity : "deployment_handler" - # url of the deployment-handler service for policy-handler to direct the policy-updates to - # - expecting dns to resolve the hostname deployment-handler to ip address - url : "http://deployment-handler:8188" - # limit the size of a single data segment for policy-update messages - # from policy-handler to deployment-handler in megabytes - max_msg_length_mb : 5 - query : - # optionally specify the tenant name for the cloudify under deployment-handler - # if not specified the "default_tenant" is used by the deployment-handler - cfy_tenant_name : "default_tenant" -``` -TODO: provide explanations - -7. Pull and run the docker container -``` -docker login -u docker -p docker nexus3.onap.org:10001 -docker pull nexus3.onap.org:10001/onap/org.onap.dcaegen2.deployments.bootstrap:1.1-latest0 -docker run -d --name boot -v CONFIGDIR:/opt/app/installer/config -e "LOCATION=dg2" nexus3.onap.org:10003/onap/org.onap.dcaegen2.deployments.bootstrap:1.1-latest -``` -The container stays up even after the installation is complete. Using the docker exec command to get inside of the container, then run cfy commands to interact with the Cloudify Manager. - -8. To tear down all of the DCAE installation: - -``` -docker exec -it boot ./teardown -``` diff --git a/bootstrap/installer-docker.sh-template b/bootstrap/installer-docker.sh-template deleted file mode 100755 index 1364dc1..0000000 --- a/bootstrap/installer-docker.sh-template +++ /dev/null @@ -1,470 +0,0 @@ -#!/bin/bash -# -# ============LICENSE_START========================================== -# =================================================================== -# Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved. -# =================================================================== -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied -# See the License for the specific language governing permissions and -# limitations under the License. -# ============LICENSE_END============================================ -# - -# URLs for artifacts needed for installation -DESIGTYPES=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/type_files/dnsdesig/dns_types.yaml -DESIGPLUG=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/dnsdesig-1.0.0-py27-none-any.wgn -SSHKEYTYPES=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/type_files/sshkeyshare/sshkey_types.yaml -SSHKEYPLUG=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/sshkeyshare-1.0.0-py27-none-any.wgn -OSPLUGINZIP=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/archive/1.4.zip -OSPLUGINWGN=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/releases/download/2.2.0/cloudify_openstack_plugin-2.2.0-py27-none-linux_x86_64-centos-Core.wgn - -PLATBPSRC=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.blueprints/releases/blueprints -DOCKERBP=DockerBP.yaml -CBSBP=config_binding_service.yaml -PGBP=pgaas-onevm.yaml -CDAPBP=cdapbp7.yaml -CDAPBROKERBP=cdap_broker.yaml -INVBP=inventory.yaml -DHBP=DeploymentHandler.yaml -PHBP=policy_handler.yaml -VESBP=ves.yaml -TCABP=tca.yaml -HRULESBP=holmes-rules.yaml -HENGINEBP=holmes-engine.yaml -PRHBP=prh.yaml -HVVESBP=hv-ves.yaml - -DOCKERBPURL="${PLATBPSRC}/${DOCKERBP}" -CBSBPURL="${PLATBPSRC}/${CBSBP}" -PGBPURL="${PLATBPSRC}/${PGBP}" -CDAPBPURL="${PLATBPSRC}/${CDAPBP}" -CDAPBROKERBPURL="${PLATBPSRC}/${CDAPBROKERBP}" -INVBPURL="${PLATBPSRC}/${INVBP}" -DHBPURL="${PLATBPSRC}/${DHBP}" -PHBPURL="${PLATBPSRC}/${PHBP}" -VESBPURL="${PLATBPSRC}/${VESBP}" -TCABPURL="${PLATBPSRC}/${TCABP}" -HRULESBPURL="${PLATBPSRC}/${HRULESBP}" -HENGINEBPURL="${PLATBPSRC}/${HENGINEBP}" -PRHBPURL="${PLATBPSRC}/${PRHBP}" -HVVESBPURL="${PLATBPSRC}/${HVVESBP}" - -LOCATIONID=$(printenv LOCATION) - -# Make sure ssh doesn't prompt for new host or choke on a new host with an IP it's seen before -SSHOPTS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" -STARTDIR=$(pwd) - -# clear out files for writing out floating IP addresses -rm -f "$STARTDIR"/config/runtime.ip.consul -rm -f "$STARTDIR"/config/runtime.ip.cm - - -SSHUSER=centos -PVTKEY=./config/key -INPUTS=./config/inputs.yaml - -if [ "$LOCATION" = "" ] -then - echo 'Environment variable LOCATION not set. Should be set to location ID for this installation.' - exit 1 -fi - -set -e -set -x - -# Docker workaround for SSH key -# In order for the container to be able to access the key when it's mounted from the Docker host, -# the key file has to be world-readable. But ssh itself will not work with a private key that's world readable. -# So we make a copy and change permissions on the copy. -# NB -- the key on the Docker host has to be world-readable, which means that, from the host machine, you -# can't use it with ssh. It needs to be a world-readable COPY. -PVTKEY=./key600 -cp ./config/key ${PVTKEY} -chmod 600 ${PVTKEY} - -# Create a virtual environment -virtualenv dcaeinstall -source dcaeinstall/bin/activate - -# forcing pip version (pip>=10.0.0 no longer support use wheel) -pip install pip==9.0.3 - -# Install Cloudify -pip install cloudify==3.4.0 - -# Install the Cloudify OpenStack plugin -wget -qO- ${OSPLUGINZIP} > openstack.zip -pip install openstack.zip - -# Spin up a VM - -# Get the Designate and SSH key type files and plugins -mkdir types -wget -qO- ${DESIGTYPES} > types/dns_types.yaml -wget -qO- ${SSHKEYTYPES} > types/sshkey_types.yaml - -wget -O dnsdesig.wgn ${DESIGPLUG} -wget -O sshkeyshare.wgn ${SSHKEYPLUG} - -wagon install -s dnsdesig.wgn -wagon install -s sshkeyshare.wgn - -## Fix up the inputs file to get the private key locally -sed -e "s#key_filename:.*#key_filename: $PVTKEY#" < ${INPUTS} > /tmp/local_inputs - -# Now install the VM -# Don't exit on error after this point--keep container running so we can do uninstalls after a failure -set +e -if wget -O /tmp/centos_vm.yaml "${PLATBPSRC}"/centos_vm.yaml; then - mv -f /tmp/centos_vm.yaml ./blueprints/ - echo "Succeeded in getting the newest centos_vm.yaml" -else - echo "Failed to update centos_vm.yaml, using default version" - rm -f /tmp/centos_vm.yaml -fi -set -e -cfy local init --install-plugins -p ./blueprints/centos_vm.yaml -i /tmp/local_inputs -i "datacenter=$LOCATION" -cfy local execute -w install --task-retries=10 -PUBIP=$(cfy local outputs | grep -Po '"public_ip": "\K.*?(?=")') - -# wait till the cloudify manager's sshd ready -while ! nc -z -v -w5 ${PUBIP} 22; do echo "."; done -sleep 10 - -echo "Installing Cloudify Manager on ${PUBIP}." -PVTIP=$(ssh $SSHOPTS -i "$PVTKEY" "$SSHUSER"@"$PUBIP" 'echo PVTIP=`curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4`' | grep PVTIP | sed 's/PVTIP=//') -if [ "$PVTIP" = "" ] -then - echo Cannot access specified machine at $PUBIP using supplied credentials - exit -fi - - -# Copy private key onto Cloudify Manager VM -PVTKEYPATH=$(cat ${INPUTS} | grep "key_filename" | cut -d "'" -f2) -PVTKEYNAME=$(basename $PVTKEYPATH) -PVTKEYDIR=$(dirname $PVTKEYPATH) -scp $SSHOPTS -i $PVTKEY $PVTKEY $SSHUSER@$PUBIP:/tmp/$PVTKEYNAME -ssh -t $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mkdir -p $PVTKEYDIR -ssh -t $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/$PVTKEYNAME $PVTKEYPATH - -ESMAGIC=$(uuidgen -r) -WORKDIR=$HOME/cmtmp -BSDIR=$WORKDIR/cmbootstrap -PVTKEY2=$BSDIR/id_rsa.cfybootstrap -TMPBASE=$WORKDIR/tmp -TMPDIR=$TMPBASE/lib -SRCS=$WORKDIR/srcs.tar -TOOL=$WORKDIR/tool.py -rm -rf $WORKDIR -mkdir -p $BSDIR $TMPDIR/cloudify/wheels $TMPDIR/cloudify/sources $TMPDIR/manager -chmod 700 $WORKDIR -cp "$PVTKEY" $PVTKEY2 -cat >$TOOL </root/.virtualenv/virtualenv.ini; echo no-download=true >>/root/.virtualenv/virtualenv.ini"' - -# Gather installation artifacts -# from documentation, URL for manager blueprints archive -BSURL=https://github.com/cloudify-cosmo/cloudify-manager-blueprints/archive/3.4.tar.gz -BSFILE=$(basename $BSURL) - -umask 022 -wget -qO- $BSURL >$BSDIR/$BSFILE -cd $BSDIR -tar xzvf $BSFILE -MRPURL=$(python $TOOL $BSDIR/cloudify-manager-blueprints-3.4) -MRPFILE=$(basename $MRPURL) -wget -qO- $MRPURL >$TMPDIR/cloudify/sources/$MRPFILE - -tar cf $SRCS -C $TMPDIR cloudify -rm -rf $TMPBASE -# -# Load required package files onto VM -# -scp $SSHOPTS -i $PVTKEY2 $SRCS $SSHUSER@$PUBIP:/tmp/. -ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "cd /opt; tar xf /tmp/srcs.tar; chown -R root:root /opt/cloudify /opt/manager; rm -rf /tmp/srcs.tar"' -# -# Install config file -- was done by DCAE controller. What now? -# -ssh $SSHOPTS -t -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc '"'"'mkdir -p /opt/dcae; if [ -f /tmp/cfy-config.txt ]; then cp /tmp/cfy-config.txt /opt/dcae/config.txt && chmod 644 /opt/dcae/config.txt; fi'"'" -cd $WORKDIR - -# -# Check for and set up https certificate information -# -rm -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt -ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "openssl pkcs12 -in /opt/app/dcae-certificate/certificate.pkcs12 -passin file:/opt/app/dcae-certificate/.password -nodes -chain"' | awk 'BEGIN{x="/dev/null";}/-----BEGIN CERTIFICATE-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.crt";}/-----BEGIN PRIVATE KEY-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.key";}{print >x;}/-----END /{x="/dev/null";}' -USESSL=false -if [ -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key -a -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt ] -then - USESSL=true -fi -# -# Set up configuration for the bootstrap -# -export CLOUDIFY_USERNAME=admin CLOUDIFY_PASSWORD=encc0fba9f6d618a1a51935b42342b17658 -cd $BSDIR/cloudify-manager-blueprints-3.4 -cp simple-manager-blueprint.yaml bootstrap-blueprint.yaml -ed bootstrap-blueprint.yaml <<'!EOF' -/^node_types:/-1a - plugin_resources: - description: > - Holds any archives that should be uploaded to the manager. - default: [] - dsl_resources: - description: > - Holds a set of dsl required resources - default: [] -. -/^ upload_resources:/a - plugin_resources: { get_input: plugin_resources } -. -w -q -!EOF - -sed bootstrap-inputs.yaml \ - -e "s;.*public_ip: .*;public_ip: '$PUBIP';" \ - -e "s;.*private_ip: .*;private_ip: '$PVTIP';" \ - -e "s;.*ssh_user: .*;ssh_user: '$SSHUSER';" \ - -e "s;.*ssh_key_filename: .*;ssh_key_filename: '$PVTKEY2';" \ - -e "s;.*elasticsearch_java_opts: .*;elasticsearch_java_opts: '-Des.cluster.name=$ESMAGIC';" \ - -e "/ssl_enabled: /s/.*/ssl_enabled: $USESSL/" \ - -e "/security_enabled: /s/.*/security_enabled: $USESSL/" \ - -e "/admin_password: /s/.*/admin_password: '$CLOUDIFY_PASSWORD'/" \ - -e "/admin_username: /s/.*/admin_username: '$CLOUDIFY_USERNAME'/" \ - -e "s;.*manager_resources_package: .*;manager_resources_package: 'http://169.254.169.254/nosuchthing/$MRPFILE';" \ - -e "s;.*ignore_bootstrap_validations: .*;ignore_bootstrap_validations: true;" \ - -# Add plugin resources -# TODO Maintain plugin list as updates/additions occur -cat >>bootstrap-inputs.yaml <<'!EOF' -plugin_resources: - - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-openstack-plugin/1.4/cloudify_openstack_plugin-1.4-py27-none-linux_x86_64-centos-Core.wgn' - - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-fabric-plugin/1.4.1/cloudify_fabric_plugin-1.4.1-py27-none-linux_x86_64-centos-Core.wgn' - - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/dnsdesig-1.0.0-py27-none-any.wgn' - - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/sshkeyshare-1.0.0-py27-none-any.wgn' - - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/pgaas-1.0.0-py27-none-any.wgn' - - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/cdapcloudify/cdapcloudify-14.2.5-py27-none-any.wgn' - - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/dcaepolicyplugin/dcaepolicyplugin-1.0.0-py27-none-any.wgn' - - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/dockerplugin/dockerplugin-2.4.0-py27-none-any.wgn' - - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/relationshipplugin/relationshipplugin-1.0.0-py27-none-any.wgn' -!EOF -# -# And away we go -# -cfy init -r -cfy bootstrap --install-plugins -p bootstrap-blueprint.yaml -i bootstrap-inputs.yaml -rm -f resources/ssl/server.key - -# Install Consul VM via a blueprint -cd $STARTDIR -mkdir consul -cd consul -cfy init -r -cfy use -t ${PUBIP} -echo "Deploying Consul VM" - -set +e -if wget -O /tmp/consul_cluster.yaml "${PLATBPSRC}"/consul_cluster.yaml; then - mv -f /tmp/consul_cluster.yaml ../blueprints/ - echo "Succeeded in getting the newest consul_cluster.yaml" -else - echo "Failed to update consul_cluster.yaml, using default version" - rm -f /tmp/consul_cluster.yaml -fi -set -e -cfy install -p ../blueprints/consul_cluster.yaml -d consul -i ../${INPUTS} -i "datacenter=$LOCATION" - -# Get the floating IP for one member of the cluster -# Needed for instructing the Consul agent on CM host to join the cluster -CONSULIP=$(cfy deployments outputs -d consul | grep -Po 'Value: \K.*') -echo Consul deployed at $CONSULIP - -# Wait for Consul API to come up -until curl http://$CONSULIP:8500/v1/agent/services -do - echo Waiting for Consul API - sleep 60 -done - -# Wait for a leader to be elected -until [[ "$(curl -Ss http://$CONSULIP:8500/v1/status/leader)" != '""' ]] -do - echo Waiting for leader - sleep 30 -done - -# Instruct the client-mode Consul agent running on the CM to join the cluster -curl http://$PUBIP:8500/v1/agent/join/$CONSULIP - -# Register Cloudify Manager in Consul via the local agent on CM host - -REGREQ=" -{ - \"Name\" : \"cloudify_manager\", - \"ID\" : \"cloudify_manager\", - \"Tags\" : [\"http://${PUBIP}/api/v2.1\"], - \"Address\": \"${PUBIP}\", - \"Port\": 80, - \"Check\" : { - \"Name\" : \"cloudify_manager_health\", - \"Interval\" : \"300s\", - \"HTTP\" : \"http://${PUBIP}/api/v2.1/status\", - \"Status\" : \"passing\", - \"DeregisterCriticalServiceAfter\" : \"30m\" - } -} -" - -curl -X PUT -H 'Content-Type: application/json' --data-binary "$REGREQ" http://$PUBIP:8500/v1/agent/service/register -# Make Consul address available to plugins on Cloudify Manager -# TODO probably not necessary anymore -ENVINI=$(mktemp) -cat < $ENVINI -[$LOCATION] -CONSUL_HOST=$CONSULIP -CONFIG_BINDING_SERVICE=config_binding_service -!EOF -scp $SSHOPTS -i ../$PVTKEY $ENVINI $SSHUSER@$PUBIP:/tmp/env.ini -ssh -t $SSHOPTS -i ../$PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/env.ini /opt/env.ini -rm $ENVINI - - -##### INSTALLATION OF PLATFORM COMPONENTS - -# Get component blueprints -wget -P ./blueprints/docker/ ${DOCKERBPURL} -wget -P ./blueprints/cbs/ ${CBSBPURL} -wget -P ./blueprints/pg/ ${PGBPURL} -wget -P ./blueprints/cdap/ ${CDAPBPURL} -wget -P ./blueprints/cdapbroker/ ${CDAPBROKERBPURL} -wget -P ./blueprints/inv/ ${INVBPURL} -wget -P ./blueprints/dh/ ${DHBPURL} -wget -P ./blueprints/ph/ ${PHBPURL} -wget -P ./blueprints/ves/ ${VESBPURL} -wget -P ./blueprints/tca/ ${TCABPURL} -wget -P ./blueprints/hrules/ ${HRULESBPURL} -wget -P ./blueprints/hengine/ ${HENGINEBPURL} -wget -P ./blueprints/prh/ ${PRHBPURL} -wget -P ./blueprints/hv-ves/ ${HVVESBPURL} - - -# Set up the credentials for access to the Docker registry -curl -X PUT -H "Content-Type: application/json" --data-binary '[{"username":"docker", "password":"docker", "registry": "nexus3.onap.org:10001"}]' http://${CONSULIP}:8500/v1/kv/docker_plugin/docker_logins - -# Install platform Docker host -# Note we're still in the "consul" directory, which is init'ed for talking to CM - -set +e -# Docker host for platform containers -cfy install -v -p ./blueprints/docker/${DOCKERBP} -b DockerBP -d DockerPlatform -i ../${INPUTS} -i "registered_dockerhost_name=platform_dockerhost" -i "registrator_image=onapdcae/registrator:v7" -i "location_id=${LOCATION}" -i "node_name=dokp00" -i "target_datacenter=${LOCATION}" - -# Docker host for service containers -cfy deployments create -b DockerBP -d DockerComponent -i ../${INPUTS} -i "registered_dockerhost_name=component_dockerhost" -i "location_id=${LOCATION}" -i "registrator_image=onapdcae/registrator:v7" -i "node_name=doks00" -i "target_datacenter=${LOCATION}" -cfy executions start -d DockerComponent -w install - -# wait for the extended platform VMs settle -#sleep 180 - - -# CDAP cluster -cfy install -p ./blueprints/cdap/${CDAPBP} -b cdapbp7 -d cdap7 -i ../config/cdapinputs.yaml -i "location_id=${LOCATION}" - -# config binding service -cfy install -p ./blueprints/cbs/${CBSBP} -b config_binding_service -d config_binding_service -i "location_id=${LOCATION}" - - -# Postgres -cfy install -p ./blueprints/pg/${PGBP} -b pgaas -d pgaas -i ../${INPUTS} - - -# Inventory -cfy install -p ./blueprints/inv/${INVBP} -b PlatformServicesInventory -d PlatformServicesInventory -i "location_id=${LOCATION}" -i ../config/invinputs.yaml - - -# Deployment Handler DH -cat >../dhinputs < "$STARTDIR"/config/runtime.ip.consul -echo "$PUBIP" > "$STARTDIR"/config/runtime.ip.cm - - -# Keep the container up -rm -f /tmp/ready_to_exit -while [ ! -e /tmp/ready_to_exit ] -do - sleep 30 -done diff --git a/bootstrap/pom.xml b/bootstrap/pom.xml deleted file mode 100644 index d2965e9..0000000 --- a/bootstrap/pom.xml +++ /dev/null @@ -1,173 +0,0 @@ - - - - 4.0.0 - - org.onap.dcaegen2.deployments - deployments - 1.2.0-SNAPSHOT - - org.onap.dcaegen2.deployments - bootstrap - dcaegen2-deployments-bootstrap - 1.2.0-SNAPSHOT - http://maven.apache.org - - UTF-8 - true - . - - - - - py - Python - **/*.py - - - - - ${project.artifactId}-${project.version} - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - clean phase script - clean - - exec - - - - ${project.artifactId} - clean - - - - - generate-sources script - generate-sources - - exec - - - - ${project.artifactId} - generate-sources - - - - - compile script - compile - - exec - - - - ${project.artifactId} - compile - - - - - package script - package - - exec - - - - ${project.artifactId} - package - - - - - test script - test - - exec - - - - ${project.artifactId} - test - - - - - install script - install - - exec - - - - ${project.artifactId} - install - - - - - deploy script - deploy - - exec - - - - ${project.artifactId} - deploy - - - - - - - - diff --git a/bootstrap/teardown.sh b/bootstrap/teardown.sh deleted file mode 100755 index eb7ed61..0000000 --- a/bootstrap/teardown.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -# -# ============LICENSE_START========================================== -# =================================================================== -# Copyright © 2017 AT&T Intellectual Property. All rights reserved. -# =================================================================== -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied -# See the License for the specific language governing permissions and -# limitations under the License. -# ============LICENSE_END============================================ -# -# ECOMP and OpenECOMP are trademarks -# and service marks of AT&T Intellectual Property. -# -set -x -set -e - -rm -f /tmp/ready_to_exit - -source ./dcaeinstall/bin/activate -cd ./consul -cfy status -set +e -cfy uninstall -d hengine -cfy uninstall -d hrules -cfy uninstall -d tca -cfy uninstall -d ves -cfy uninstall -d cdapbroker -cfy uninstall -d cdap7 -cfy uninstall -d policy_handler -cfy uninstall -d DeploymentHandler -cfy uninstall -d PlatformServicesInventory -cfy uninstall -d pgaas -cfy uninstall -d config_binding_service -cfy executions start -w uninstall -d DockerComponent -cfy deployments delete -d DockerComponent -cfy uninstall -d DockerPlatform -cfy uninstall -d consul -cd .. -cfy local uninstall - -touch /tmp/ready_to_exit diff --git a/cloud_init/cdap-init.sh b/cloud_init/cdap-init.sh deleted file mode 100644 index d9df3ba..0000000 --- a/cloud_init/cdap-init.sh +++ /dev/null @@ -1,387 +0,0 @@ -# ============LICENSE_START==================================================== -# org.onap.dcae -# ============================================================================= -# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. -# ============================================================================= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============LICENSE_END====================================================== - -set -x -# -# get configuration -# -CODE_SOURCE=$1 -CODE_VERSION=$2 -CLUSTER_INDEX=$3 -CLUSTER_SIZE=$4 -CLUSTER_FQDNS=$5 -CLUSTER_LOCAL_IPS=$6 -CLUSTER_FLOATING_IPS=$7 -DATACENTER=$8 -REGISTERED_NAME=$9 -export JAVA_HOME=/usr/lib/jvm/default-java -md5sum /root/.sshkey/id_rsa | awk '{ print $1 }' >/root/.mysqlpw -chmod 400 /root/.mysqlpw -# -# enable outside apt repositories -# -wget -qO- http://public-repo-1.hortonworks.com/HDP/ubuntu16/2.x/updates/2.6.0.3/hdp.list >/etc/apt/sources.list.d/hdp.list -wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/cask.list >/etc/apt/sources.list.d/cask.list -wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/pubkey.gpg | apt-key add - -apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 B9733A7A07513CAD -apt-get update -# -# install software from apt repositories -# -apt-get install -y default-jdk hadoop-hdfs hadoop-mapreduce hive hbase libsnappy-dev liblzo2-dev hadooplzo spark-master spark-python zip unzip -usermod -a -G hadoop hive -if [ $CLUSTER_INDEX -lt 3 ] -then - apt-get install -y zookeeper-server - cat <>/etc/zookeeper/conf/zookeeper-env.sh -export JAVA_HOME=/usr/lib/jvm/default-java -export ZOOCFGDIR=/etc/zookeeper/conf -export ZOO_LOG_DIR=/var/log/zookeeper -export ZOOPIDFILE=/var/run/zookeeper/zookeeper_server.pid -!EOF - mkdir -p /var/lib/zookeeper - chown zookeeper:zookeeper /var/lib/zookeeper - cp /usr/hdp/current/zookeeper-server/etc/init.d/zookeeper-server /etc/init.d/. - update-rc.d zookeeper-server defaults - service zookeeper-server start -fi -if [ $CLUSTER_INDEX -eq 2 ] -then - debconf-set-selections </usr/hdp/current/spark-client/conf/java-opts -echo "export OPTS=\"\${OPTS} -Dhdp.version=$HDPVER\"" >>/etc/cdap/conf/cdap-env.sh -cat >/etc/profile.d/hadoop.sh <<'!EOF' -HADOOP_PREFIX=/usr/hdp/current/hadoop-client -HADOOP_YARN_HOME=/usr/hdp/current/hadoop-yarn-nodemanager -HADOOP_HOME=/usr/hdp/current/hadoop-client -HADOOP_COMMON_HOME=$HADOOP_HOME -HADOOP_CONF_DIR=/etc/hadoop/conf -HADOOP_HDFS_HOME=/usr/hdp/current/hadoop-hdfs-namenode -HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec -YARN_LOG_DIR=/usr/lib/hadoop-yarn/logs -HADOOP_LOG_DIR=/usr/lib/hadoop/logs -JAVA_HOME=/usr/lib/jvm/default-java -JAVA=$JAVA_HOME/bin/java -PATH=$PATH:$HADOOP_HOME/bin -HBASE_LOG_DIR=/usr/lib/hbase/logs -HADOOP_MAPRED_LOG_DIR=/usr/lib/hadoop-mapreduce/logs -HBASE_CONF_DIR=/etc/hbase/conf -export HADOOP_PREFIX HADOOP_HOME HADOOP_COMMON_HOME HADOOP_CONF_DIR HADOOP_HDFS_HOME JAVA_HOME PATH HADOOP_LIBEXEC_DIR JAVA JARN_LOG_DIR HADOOP_LOG_DIR HBASE_LOG_DIR HADOOP_MAPRED_LOG_DIR HBASE_CONF_DIR -!EOF -chmod 755 /etc/profile.d/hadoop.sh -cat >/etc/hadoop/conf/hadoop-env.sh -mv /root/.sshkey /var/lib/hadoop-hdfs/.ssh -cp /var/lib/hadoop-hdfs/.ssh/id_rsa.pub /var/lib/hadoop-hdfs/.ssh/authorized_keys ->/etc/hadoop/conf/dfs.exclude ->/etc/hadoop/conf/yarn.exclude -chown -R hdfs:hadoop /var/lib/hadoop-hdfs/.ssh /hadoop /usr/lib/hadoop -chown -R yarn:hadoop /usr/lib/hadoop-yarn /hadoop/yarn -chown -R mapred:hadoop /usr/lib/hadoop-mapreduce -chown -R hbase:hbase /usr/lib/hbase -chmod 700 /var/lib/hadoop-hdfs/.ssh -chmod 600 /var/lib/hadoop-hdfs/.ssh/* -sed -i -e '/maxClientCnxns/d' /etc/zookeeper/conf/zoo.cfg - -cat >/tmp/init.py <\n\n" - for n in m.keys(): - a = a + "\n \n {n}\n {v}\n ".format(n=n,v=m[n]) - a = a + "\n\n" - with open(f, 'w') as xml: - xml.write(a) -pxc('/etc/hadoop/conf/core-site.xml', { - 'fs.defaultFS':'hdfs://cl' - }) -pxc('/etc/hadoop/conf/hdfs-site.xml', { - 'dfs.namenode.datanode.registration.ip-hostname-check':'false', - 'dfs.namenode.name.dir':'/hadoop/hdfs/namenode', - 'dfs.hosts.exclude':'/etc/hadoop/conf/dfs.exclude', - 'dfs.datanode.data.dir':'/hadoop/hdfs/data', - 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode', - 'dfs.nameservices':'cl', - 'dfs.ha.namenodes.cl':'nn1,nn2', - 'dfs.namenode.rpc-address.cl.nn1':localips[0]+':8020', - 'dfs.namenode.rpc-address.cl.nn2':localips[1]+':8020', - 'dfs.namenode.http-address.cl.nn1':localips[0]+':50070', - 'dfs.namenode.http-address.cl.nn2':localips[1]+':50070', - 'dfs.namenode.shared.edits.dir':'qjournal://'+localips[0]+':8485;'+localips[1]+':8485;'+localips[2]+':8485/cl', - 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode', - 'dfs.client.failover.proxy.provider.cl':'org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider', - 'dfs.ha.fencing.methods':'sshfence(hdfs),shell(/bin/true)', - 'dfs.ha.fencing.ssh.private-key-files':'/var/lib/hadoop-hdfs/.ssh/id_rsa', - 'dfs.ha.fencing.ssh.connect-timeout':'30000', - 'dfs.ha.automatic-failover.enabled':'true', - 'ha.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181' - }) -pxc('/etc/hadoop/conf/yarn-site.xml', { - 'yarn.nodemanager.vmem-check-enabled':'false', - 'yarn.application.classpath':'/etc/hadoop/conf,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*', - 'yarn.nodemanager.delete.debug-delay-sec':'43200', - 'yarn.scheduler.minimum-allocation-mb':'512', - 'yarn.scheduler.maximum-allocation-mb':'8192', - 'yarn.nodemanager.local-dirs':'/hadoop/yarn/local', - 'yarn.nodemanager.log-dirs':'/hadoop/yarn/log', - 'yarn.resourcemanager.zk-address':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181', - 'yarn.resourcemanager.ha.enabled':'true', - 'yarn.resourcemanager.ha.rm-ids':'rm1,rm2', - 'yarn.resourcemanager.hostname.rm1':localips[1], - 'yarn.resourcemanager.hostname.rm2':localips[2], - 'yarn.resourcemanager.cluster-id':'cl', - 'yarn.resourcemanager.recovery-enabled':'true', - 'yarn.resourcemanager.store.class':'org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore', - 'yarn.resourcemanager.nodes.exclude-path':'/etc/hadoop/conf/yarn.exclude' - }) -pxc('/etc/hadoop/conf/mapred-site.xml', { - 'mapreduce.application.classpath':'/etc/hadoop/conf,/usr/lib/hadoop/lib/*,/usr/lib/hadoop/*,/usr/hdp/current/hadoop-hdfs-namenode/,/usr/hdp/current/hadoop-hdfs-namenode/lib/*,/usr/hdp/current/hadoop-hdfs-namenode/*,/usr/hdp/current/hadoop-yarn-nodemanager/lib/*,/usr/hdp/current/hadoop-yarn-nodemanager/*,/usr/hdp/current/hadoop-mapreduce-historyserver/lib/*,/usr/hdp/current/hadoop-mapreduce-historyserver/*', - 'mapreduce.jobhistory.intermediate-done-dir':'/mr-history/tmp', - 'mapreduce.jobhistory.done-dir':'/mr-history/done', - 'mapreduce.jobhistory.address':localips[1], - 'mapreduce.jobhistory.webapp.address':localips[1] - }) -pxc('/etc/hbase/conf/hbase-site.xml', { - 'hbase.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181', - 'hbase.rootdir':'hdfs://cl/apps/hbase/data', - 'hbase.cluster.distributed':'true' - }) -pxc('/etc/hive/conf/hive-site.xml', { - 'fs.file.impl.disable.cache':'true', - 'fs.hdfs.impl.disable.cache':'true', - 'hadoop.clientside.fs.operations':'true', - 'hive.auto.convert.join.noconditionaltask.size':'1000000000', - 'hive.auto.convert.sortmerge.join.noconditionaltask':'true', - 'hive.auto.convert.sortmerge.join':'true', - 'hive.enforce.bucketing':'true', - 'hive.enforce.sorting':'true', - 'hive.mapjoin.bucket.cache.size':'10000', - 'hive.mapred.reduce.tasks.speculative.execution':'false', - 'hive.metastore.cache.pinobjtypes':'Table,Database,Type,FieldSchema,Order', - 'hive.metastore.client.socket.timeout':'60s', - 'hive.metastore.local':'true', - 'hive.metastore.uris':'thrift://' + fqdns[2] + ':9083', - 'hive.metastore.warehouse.dir':'/apps/hive/warehouse', - 'hive.optimize.bucketmapjoin.sortedmerge':'true', - 'hive.optimize.bucketmapjoin':'true', - 'hive.optimize.mapjoin.mapreduce':'true', - 'hive.optimize.reducededuplication.min.reducer':'1', - 'hive.security.authorization.manager':'org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider', - 'hive.semantic.analyzer.factory.impl':'org.apache.hivealog.cli.HCatSemanticAnalyzerFactory', - 'javax.jdo.option.ConnectionDriverName':'com.mysql.jdbc.Driver', - 'javax.jdo.option.ConnectionPassword': mysqlpw, - 'javax.jdo.option.ConnectionURL':'jdbc:mysql://localhost:3306/metastore?createDatabaseIfNotExist=true', - 'javax.jdo.option.ConnectionUserName':'root' - }) -if myid == 2: - pxc('/etc/cdap/conf/cdap-site.xml', { - 'zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181/\${root.namespace}', - 'router.server.address':localips[2], - 'explore.enabled':'true', - 'enable.unrecoverable.reset':'true', - 'kafka.seed.brokers':localips[2] + ':9092', - 'app.program.jvm.opts':'-XX:MaxPermSize=128M \${twill.jvm.gc.opts} -Dhdp.version=$HDPVER -Dspark.yarn.am.extraJavaOptions=-Dhdp.version=$HDPVER' - }) -with open('/etc/hbase/conf/regionservers', 'w') as f: - for ip in localips: - f.write('{ip}\n'.format(ip=ip)) -with open('/etc/hbase/conf/hbase-env.sh', 'a') as f: - f.write("export HBASE_MANAGES_ZK=false\n") -with open('/etc/zookeeper/conf/zoo.cfg', 'a') as f: - f.write("server.1={L1}:2888:3888\nserver.2={L2}:2888:3888\nserver.3={L3}:2888:3888\nmaxClientCnxns=0\nautopurge.purgeInterval=6\n".format(L1=localips[0],L2=localips[1],L3=localips[2])) -with open('/etc/clustermembers', 'w') as f: - f.write("export me={me}\n".format(me=myid)) - for idx in range(len(localips)): - f.write("export n{i}={ip}\n".format(i=idx, ip=localips[idx])) - f.write("export N{i}={ip}\n".format(i=idx, ip=floatingips[idx])) -with open('/etc/hadoop/conf/slaves', 'w') as f: - for idx in range(len(localips)): - if idx != myid: - f.write("{x}\n".format(x=localips[idx])) -if myid < 3: - with open('/var/lib/zookeeper/myid', 'w') as f: - f.write("{id}".format(id=(myid + 1))) - os.system('service zookeeper-server restart') -for ip in localips: - os.system("su - hdfs -c \"ssh -o StrictHostKeyChecking=no -o NumberOfPasswordPrompts=0 {ip} echo Connectivity to {ip} verified\"".format(ip=ip)) -!EOF - -python /tmp/init.py - -. /etc/clustermembers -waitfor() { - while ( ! nc $1 $2 >/var/log/hive/hive.out 2>>/var/log/hive/hive.log /tmp/cinst.sh - bash /tmp/cinst.sh <>/etc/clustermembers -fi diff --git a/cloud_init/instconsulagentub16.sh b/cloud_init/instconsulagentub16.sh deleted file mode 100644 index 87c9f92..0000000 --- a/cloud_init/instconsulagentub16.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -# ============LICENSE_START==================================================== -# org.onap.dcae -# ============================================================================= -# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. -# ============================================================================= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============LICENSE_END====================================================== - -CONSULVER=0.8.3 -CONSULNAME=consul_${CONSULVER}_linux_amd64 -CB=/opt/consul/bin -CD=/opt/consul/data -CF=/opt/consul/config -mkdir -p $CB $CD $CF -cat >$CF/consul.json -cd $CB -wget https://releases.hashicorp.com/consul/${CONSULVER}/${CONSULNAME}.zip -unzip ${CONSULNAME}.zip -rm ${CONSULNAME}.zip -mv consul ${CONSULNAME} -ln -s ${CONSULNAME} consul -cat < /lib/systemd/system/consul.service -[Unit] -Description=Consul -Requires=network-online.target -After=network.target -[Service] -Type=simple -ExecStart=/opt/consul/bin/consul agent -config-dir=/opt/consul/config -ExecReload=/bin/kill -HUP \$MAINPID -[Install] -WantedBy=multi-user.target -EOF -systemctl enable consul -systemctl start consul -until /opt/consul/bin/consul join "dcae-cnsl" -do - echo Waiting to join Consul cluster - sleep 60 -done diff --git a/cloud_init/pom.xml b/cloud_init/pom.xml deleted file mode 100644 index 7eb0513..0000000 --- a/cloud_init/pom.xml +++ /dev/null @@ -1,173 +0,0 @@ - - - - 4.0.0 - - org.onap.dcaegen2.deployments - deployments - 1.2.0-SNAPSHOT - - org.onap.dcaegen2.deployments - cloud_init - dcaegen2-deployments-cloud_init - 1.1.0-SNAPSHOT - http://maven.apache.org - - UTF-8 - true - . - - - - - py - Python - **/*.py - - - - - ${project.artifactId}-${project.version} - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - clean phase script - clean - - exec - - - - ${project.artifactId} - clean - - - - - generate-sources script - generate-sources - - exec - - - - ${project.artifactId} - generate-sources - - - - - compile script - compile - - exec - - - - ${project.artifactId} - compile - - - - - package script - package - - exec - - - - ${project.artifactId} - package - - - - - test script - test - - exec - - - - ${project.artifactId} - test - - - - - install script - install - - exec - - - - ${project.artifactId} - install - - - - - deploy script - deploy - - exec - - - - ${project.artifactId} - deploy - - - - - - - - diff --git a/cm-container/Dockerfile b/cm-container/Dockerfile new file mode 100644 index 0000000..7415c55 --- /dev/null +++ b/cm-container/Dockerfile @@ -0,0 +1,50 @@ +# ============LICENSE_START======================================================= +# org.onap.dcae +# ================================================================================ +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +FROM cloudifyplatform/community:18.7.23 +MAINTAINER maintainer + +ENV TYPE_REPO https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R3 +ENV CCSDK_REPO https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins + +# Store type files locally +RUN mkdir scripts +COPY get-type-files.sh dcae-cleanup.sh scripts/ +# Load our type files and the Cloudify 3.4 type files +RUN scripts/get-type-files.sh ${TYPE_REPO} ${CCSDK_REPO}\ + && mkdir /opt/manager/resources/spec/cloudify/3.4\ + && curl -Ss https://cloudify.co/spec/cloudify/3.4/types.yaml > /opt/manager/resources/spec/cloudify/3.4/types.yaml\ + && chown -R cfyuser:cfyuser /opt/manager/resources/spec/cloudify/3.4\ + && chmod +x scripts/*.sh +# Create mount point for CM config file +RUN mkdir -p /opt/onap && chown cfyuser:cfyuser /opt/onap + +# For HEAT environment, install software needed to use Cloudify CLI 4.2 to install plugins & deploy blueprints locally +# Install python development-related packages +RUN yum install -y gcc python-devel python-virtualenv python-pip + +# Install jq (used for cleanup--parsing output of CM API call) +RUN curl -Ss -L "https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64" > /bin/jq \ +&& chmod +x /bin/jq + +# Set up virtualenv and install Cloudify CLI 4.2 +RUN pip install --upgrade pip==9.0.3 \ + && virtualenv cfy42 \ + && source cfy42/bin/activate \ + && pip install cloudify==4.2 diff --git a/cm-container/scripts/get-type-files.sh b/cm-container/scripts/get-type-files.sh index c931467..8a31d98 100755 --- a/cm-container/scripts/get-type-files.sh +++ b/cm-container/scripts/get-type-files.sh @@ -40,6 +40,7 @@ CCSDKTYPEFILES=\ /type_files/pgaas/1.1.0/pgaas_types.yaml \ /type_files/sshkeyshare/sshkey_types.yaml \ /type_files/helm/4.0.0/helm-type.yaml \ +/type_files/dmaap/dmaap.yaml \ " mkdir ${DEST} @@ -72,4 +73,4 @@ chown cfyuser:cfyuser /etc/cloudify/config.yaml # Need to provide the additional rules in a file that can be # used at deployment time to update the resolver rules echo "- ${TYPE_RULE0}" > ${EXTRA_RULES} -echo "- ${TYPE_RULE1}" >> ${EXTRA_RULES} \ No newline at end of file +echo "- ${TYPE_RULE1}" >> ${EXTRA_RULES} diff --git a/healthcheck-container/healthcheck.js b/healthcheck-container/healthcheck.js index c4f2dd7..33bb848 100644 --- a/healthcheck-container/healthcheck.js +++ b/healthcheck-container/healthcheck.js @@ -1,5 +1,5 @@ /* -Copyright(c) 2018 AT&T Intellectual Property. All rights reserved. +Copyright(c) 2018-2019 AT&T Intellectual Property. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -41,7 +41,7 @@ const bootDeps = 'dep-dcae-tca-analytics', 'dep-dcae-prh', 'dep-dcae-hv-ves-collector', - 'dep-dcae-datafile-collector' + 'dep-dcae-dashboard' ]; const status = require('./get-status'); diff --git a/heat/docker-compose-1.yaml b/heat/docker-compose-1.yaml deleted file mode 100644 index 3041d6c..0000000 --- a/heat/docker-compose-1.yaml +++ /dev/null @@ -1,82 +0,0 @@ -version: '2.1' -services: - pgHolmes: - image: "postgres:9.5" - container_name: "pgHolmes" - restart: "always" - hostname: "phHolmes" - environment: - - "POSTGRES_USER=holmes" - - "POSTGRES_PASSWORD=holmespwd" - ports: - - "5432:5432" - labels: - - "SERVICE_5432_NAME=pgHolmes" - - "SERVICE_5432_CHECK_TCP=true" - - "SERVICE_5432_CHECK_INTERVAL=15s" - - "SERVICE_5432_CHECK_INITIAL_STATUS=passing" - - pgInventory: - image: "postgres:9.5" - container_name: "pgInventory" - restart: "always" - hostname: "pgInventory" - environment: - - "POSTGRES_USER=inventory" - - "POSTGRES_PASSWORD=inventorypwd" - ports: - - "5433:5432" - labels: - - "SERVICE_5432_NAME=pgInventory" - - "SERVICE_5432_CHECK_TCP=true" - - "SERVICE_5432_CHECK_INTERVAL=15s" - - "SERVICE_5432_CHECK_INITIAL_STATUS=passing" - - - consul: - image: "consul:0.8.3" - container_name: "consul" - privileged: true - restart: "always" - hostname: "consul" - ports: - - "8500:8500" - - "53:8600/udp" - - "53:8600/tcp" - environment: - - "DOCKER_HOST=tcp://{{ dcae_ip_addr }}:2376" - command: "agent -ui -server -bootstrap-expect 1 -client 0.0.0.0 -log-level trace -recursor {{ dns_ip_addr }}" - labels: - - "SERVICE_8500_NAME=consul" - - "SERVICE_8500_CHECK_HTTP=/v1/agent/services" - - "SERVICE_8500_CHECK_INTERVAL=15s" - - "SERVICE_8500_CHECK_INITIAL_STATUS=passing" - - - config-binding-service: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.configbinding.app-app:{{ dcae_docker_cbs }}" - container_name: "config_binding_service" - restart: "always" - hostname: "config-binding-service" - environment: - - "CONSUL_HOST=consul" - ports: - - "10000:10000" - depends_on: - - "consul" - - "tls-init" - labels: - - "SERVICE_10000_NAME=config_binding_service" - - "SERVICE_10000_CHECK_HTTP=/healthcheck" - - "SERVICE_10000_CHECK_INTERVAL=15s" - - "SERVICE_10000_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - - tls-init: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.tls-init-container:{{ dcae_docker_tls }}" - container_name: "tls-init" - hostname: "tls-init" - volumes: - - "./tls/shared:/opt/tls/shared" diff --git a/heat/docker-compose-2.yaml b/heat/docker-compose-2.yaml deleted file mode 100644 index dca210e..0000000 --- a/heat/docker-compose-2.yaml +++ /dev/null @@ -1,99 +0,0 @@ -version: '2.1' -services: - - mvp-dcaegen2-collectors-ves: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.ves.vescollector:{{ dcae_docker_ves }}" - container_name: "mvp-dcaegen2-collectors-ves" - restart: "always" - hostname: "mvp-dcaegen2-collectors-ves" - environment: - - "DMAAPHOST={{ mr_ip_addr }}" - - "CONSUL_HOST=consul" - - "CONSUL_PORT=8500" - - "CONFIG_BINDING_SERVICE=config_binding_service" - - "SERVICE_NAME=mvp-dcaegen2-collectors-ves" - - "HOSTNAME=mvp-dcaegen2-collectors-ves" - ports: - - "8081:8080" - labels: - - "SERVICE_8080_NAME=mvp-dcaegen2-collectors-ves" - - "SERVICE_8080_CHECK_HTTP=/healthcheck" - - "SERVICE_8080_CHECK_INTERVAL=15s" - - "SERVICE_8080_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - - mvp-dcaegen2-analytics-tca: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.tca-cdap-container:{{ dcae_docker_tca }}" - container_name: "mvp-dcaegen2-analytics-tca" - restart: "always" - hostname: "mvp-dcaegen2-analytics-tca" - environment: - - "DMAAPHOST={{ mr_ip_addr }}" - - "DMAAPPORT=3904" - - "DMAAPPUBTOPIC=unauthenticated.DCAE_CL_OUTPUT" - - "DMAAPSUBTOPIC=unauthenticated.VES_MEASUREMENT_OUTPUT" - - "AAIHOST={{ aai1_ip_addr }}" - - "AAIPORT=8443" - - "CONSUL_HOST=consul" - - "CONSUL_PORT=8500" - - "CBS_HOST=config-binding-service" - - "CBS_PORT=10000" - - "SERVICE_NAME=mvp-dcaegen2-analytics-tca" - - "HOSTNAME=mvp-dcaegen2-analytics-tca" - - "CONFIG_BINDING_SERVICE=config_binding_service" - # set the parameter below to enable REDIS caching. - #- REDISHOSTPORT=redis-cluster:6379 - ports: - - "11011:11011" - #- "11015:11015" - labels: - - "SERVICE_11011_NAME=mvp-dcaegen2-analytics-tca" - - "SERVICE_11011_CHECK_HTTP=/cdap/ns/cdap_tca_hi_lo" - - "SERVICE_11011_CHECK_INTERVAL=15s" - - "SERVICE_11011_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - mvp-dcaegen2-analytics-holmes-engine-management: - image: "{{ nexus_docker_repo }}/onap/holmes/engine-management:{{ holmes_docker_em }}" - container_name: "mvp-dcaegen2-analytics-holmes-engine-management" - restart: "always" - hostname: "mvp-dcaegen2-analytics-holmes-engine-management" - environment: - - "URL_JDBC=pgHolmes:5432" - - "JDBC_USERNAME=holmes" - - "JDBC_PASSWORD=holmespwd" - - "MSB_ADDR={{ msb_ip_addr }}" - - "CONSUL_HOST=consul" - - "CONSUL_PORT=8500" - - "CONFIG_BINDING_SERVICE=config_binding_service" - - "HOSTNAME=mvp-dcaegen2-analytics-holmes-engine-management" - ports: - - "9102:9102" - labels: - - "SERVICE_9102_IGNORE=true" - volumes: - - "./tls/shared:/opt/tls/shared" - - mvp-dcaegen2-analytics-holmes-rule-management: - image: "{{ nexus_docker_repo }}/onap/holmes/rule-management:{{ holmes_docker_rm }}" - container_name: "mvp-dcaegen2-analytics-holmes-rule-management" - restart: "always" - hostname: "mvp-dcaegen2-analytics-holmes-rule-management" - environment: - - "URL_JDBC=pgHolmes:5432" - - "JDBC_USERNAME=holmes" - - "JDBC_PASSWORD=holmespwd" - - "MSB_ADDR={{ msb_ip_addr }}" - - "CONSUL_HOST=consul" - - "CONSUL_PORT=8500" - - "CONFIG_BINDING_SERVICE=config_binding_service" - - "HOSTNAME=mvp-dcaegen2-analytics-holmes-rule-management" - ports: - - "9101:9101" - labels: - - "SERVICE_9101_IGNORE=true" - volumes: - - "./tls/shared:/opt/tls/shared" diff --git a/heat/docker-compose-3.yaml b/heat/docker-compose-3.yaml deleted file mode 100644 index 27dbb38..0000000 --- a/heat/docker-compose-3.yaml +++ /dev/null @@ -1,70 +0,0 @@ -version: '2.1' -services: - - inventory: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.inventory-api:{{ dcae_docker_inv }}" - restart: "always" - container_name: "inventory" - hostname: "inventory" - environment: - - "POSTGRES_USER=inventory" - - "POSTGRES_PASSWORD=inventorypwd" - ports: - - "8080:8080" - labels: - - "SERVICE_8080_NAME=inventory" - - "SERVICE_8080_CHECK_HTTP=/dcae-service-types" - - "SERVICE_8080_CHECK_INTERVAL=15s" - - "SERVICE_8080_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - - service-change-handler: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.servicechange-handler:{{ dcae_docker_sch }}" - container_name: "service-change-handler" - restart: "always" - hostname: "service-change-handler" - ports: - - "8079:8079" - environment: - - "POSTGRES_USER=inventory" - - "POSTGRES_PASSWORD=inventorypwd" - labels: - - "SERVICE_NAME=service_change_handler" - - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/health.sh" - - "SERVICE_CHECK_INTERVAL=15s" - - "SERVICE_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - - deployment_handler: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.deployment-handler:{{ dcae_docker_dh }}" - container_name: "deployment-handler" - restart: "always" - hostname: "deployment-handler" - environment: - - "CLOUDIFY_PASSWORD=admin" - - "CLOUDIFY_USER=admin" - ports: - - "8188:8443" - volumes: - - "./tls/shared:/opt/app/dh/etc/cert/" - - - policy_handler: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.policy-handler:{{ dcae_docker_ph }}" - container_name: "policy-handler" - restart: "always" - hostname: "policy-handler" - ports: - - "25577:25577" - labels: - - "SERVICE_25577_NAME=policy_handler" - - "SERVICE_25577_CHECK_HTTP=/healthcheck" - - "SERVICE_25577_CHECK_INTERVAL=15s" - - "SERVICE_25577_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/app/policy_handler/etc/tls/certs/" - diff --git a/heat/docker-compose-4.yaml b/heat/docker-compose-4.yaml deleted file mode 100644 index c13562d..0000000 --- a/heat/docker-compose-4.yaml +++ /dev/null @@ -1,167 +0,0 @@ -version: '2.1' -services: - snmptrap: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.snmptrap:{{ dcae_docker_snmptrap }}" - container_name: "static-dcaegen2-collectors-snmptrap" - restart: "always" - hostname: "static-dcaegen2-collectors-snmptrap" - environment: - - "DMAAPHOST={{ mr_ip_addr }}" - - "CONSUL_HOST=consul" - - "CONSUL_PORT=8500" - - "CONFIG_BINDING_SERVICE=config_binding_service" - - "SERVICE_NAME=static-dcaegen2-collectors-snmptrap" - - "HOSTNAME=static-dcaegen2-collectors-snmptrap" - - "HOSTALIASES=/etc/host.aliases" - ports: - - "162:6162/udp" - labels: - - "SERVICE_NAME=static-dcaegen2-collectors-snmptrap" - - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/snmptrap/bin/snmptrapd.sh status" - - "SERVICE_CHECK_INTERVAL=300s" - - "SERVICE_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - - prh: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.prh.prh-app-server:{{ dcae_docker_prh }}" - container_name: "static-dcaegen2-services-prh" - restart: "always" - hostname: "static-dcaegen2-services-prh" - environment: - - "DMAAPHOST={{ mr_ip_addr }}" - - "CONSUL_HOST=consul" - - "CONSUL_PORT=8500" - - "CONFIG_BINDING_SERVICE=config_binding_service" - - "SERVICE_NAME=static-dcaegen2-services-prh" - - "HOSTNAME=static-dcaegen2-services-prh" - - "HOSTALIASES=/etc/host.aliases" - ports: - - "8082:8080" - labels: - - "SERVICE_8082_NAME=static-dcaegen2-services-prh" - - "SERVICE_8082_CHECK_HTTP=/heartbeat" - - "SERVICE_8082_CHECK_INTERVAL=15s" - - "SERVICE_8082_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - - hvves: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:{{ dcae_docker_hvves }}" - container_name: "static-dcaegen2-collectors-hvves" - restart: "always" - hostname: "static-dcaegen2-collectors-hvves" - environment: - - "DMAAPHOST={{ mr_ip_addr }}" - - "CONSUL_HOST=consul" - - "CONSUL_PORT=8500" - - "CONFIG_BINDING_SERVICE=config_binding_service" - - "SERVICE_NAME=static-dcaegen2-collectors-hvves" - - "HOSTNAME=static-dcaegen2-collectors-hvves" - - "HOSTALIASES=/etc/host.aliases" - ports: - - "6061:6061" - labels: - - "SERVICE_NAME=static-dcaegen2-collectors-hvves" - - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/hvves/bin/healthcheck.sh" - - "SERVICE_CHECK_INTERVAL=15s" - - "SERVICE_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - - datafile: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.datafile.datafile-app-server:{{ dcae_docker_datafile }}" - container_name: "static-dcaegen2-collectors-datafile" - restart: "always" - hostname: "static-dcaegen2-collectors-datafile" - environment: - - "DMAAPHOST={{ mr_ip_addr }}" - - "CONSUL_HOST=consul" - - "CONSUL_PORT=8500" - - "CONFIG_BINDING_SERVICE=config_binding_service" - - "SERVICE_NAME=static-dcaegen2-collectors-datafile" - - "HOSTNAME=static-dcaegen2-collectors-datafile" - - "HOSTALIASES=/etc/host.aliases" - labels: - - "SERVICE_NAME=static-dcaegen2-collectors-datafile" - - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" - - "SERVICE_CHECK_INTERVAL=15s" - - "SERVICE_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - mapper-universalvesadaptor: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.universalvesadaptor:{{ dcae_docker_mua }}" - container_name: "static-dcaegen2-services-mua" - restart: "always" - hostname: "static-dcaegen2-services-mua" - environment: - - "DMAAPHOST={{ mr_ip_addr }}" - - "CONSUL_HOST=consul" - - "CONSUL_PORT=8500" - - "CONFIG_BINDING_SERVICE=config_binding_service" - - "SERVICE_NAME=static-dcaegen2-services-mua" - - "HOSTNAME=static-dcaegen2-services-mua" - - "HOSTALIASES=/etc/host.aliases" - - "MR_DEFAULT_PORT_NUMBER=3904" - - "URL_JDBC=jdbc:postgresql://{{dcae_ip_addr}}:5433/inventory" - - "JDBC_USERNAME=inventory" - - "JDBC_PASSWORD=inventorypwd" - labels: - - "SERVICE_NAME=static-dcaegen2-services-mua" - - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" - - "SERVICE_CHECK_INTERVAL=15s" - - "SERVICE_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - mapper-snmp: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.snmpmapper:{{ dcae_docker_msnmp }}" - container_name: "static-dcaegen2-services-msnmp" - restart: "always" - hostname: "static-dcaegen2-services-msnmp" - environment: - - "DMAAPHOST={{ mr_ip_addr }}" - - "CONSUL_HOST=consul" - - "CONSUL_PORT=8500" - - "CONFIG_BINDING_SERVICE=config_binding_service" - - "SERVICE_NAME=static-dcaegen2-services-msnmp" - - "HOSTNAME=static-dcaegen2-services-msnmp" - - "HOSTALIASES=/etc/host.aliases" - - "URL_JDBC=jdbc:postgresql://{{dcae_ip_addr}}:5433/inventory" - - "JDBC_USERNAME=inventory" - - "JDBC_PASSWORD=inventorypwd" - labels: - - "SERVICE_NAME=static-dcaegen2-services-msnmp" - - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" - - "SERVICE_CHECK_INTERVAL=15s" - - "SERVICE_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - - heartbeat: - image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.heartbeat:{{ dcae_docker_heartbeat }}" - container_name: "static-dcaegen2-services-heartbeat" - restart: "always" - hostname: "static-dcaegen2-services-heartbeat" - environment: - - "DMAAPHOST={{ mr_ip_addr }}" - - "CONSUL_HOST=consul" - - "CONSUL_PORT=8500" - - "CONFIG_BINDING_SERVICE=config_binding_service" - - "SERVICE_NAME=static-dcaegen2-services-heartbeat" - - "HOSTNAME=static-dcaegen2-services-heartbeat" - - "HOSTALIASES=/etc/host.aliases" - labels: - - "SERVICE_NAME=static-dcaegen2-services-heartbeat" - - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" - - "SERVICE_CHECK_INTERVAL=15s" - - "SERVICE_CHECK_INITIAL_STATUS=passing" - volumes: - - "./tls/shared:/opt/tls/shared" - - diff --git a/heat/pom.xml b/heat/pom.xml deleted file mode 100644 index e21db72..0000000 --- a/heat/pom.xml +++ /dev/null @@ -1,158 +0,0 @@ - - - - 4.0.0 - - org.onap.dcaegen2.deployments - deployments - 1.2.0-SNAPSHOT - - org.onap.dcaegen2.deployments - heat - dcaegen2-deployments-heat - 1.0.0-SNAPSHOT - http://maven.apache.org - - UTF-8 - true - - - ${project.artifactId}-${project.version} - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - clean phase script - clean - - exec - - - - ${project.artifactId} - clean - - - - - generate-sources script - generate-sources - - exec - - - - ${project.artifactId} - generate-sources - - - - - compile script - compile - - exec - - - - ${project.artifactId} - compile - - - - - package script - package - - exec - - - - ${project.artifactId} - package - - - - - test script - test - - exec - - - - ${project.artifactId} - test - - - - - install script - install - - exec - - - - ${project.artifactId} - install - - - - - deploy script - deploy - - exec - - - - ${project.artifactId} - deploy - - - - - - - - diff --git a/heat/pullall.sh b/heat/pullall.sh deleted file mode 100755 index 42ee1ad..0000000 --- a/heat/pullall.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -############################################################################# -# -# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -############################################################################# - -docker login {{ nexus_docker_repo }} -u {{ nexus_username }} -p {{ nexus_password }} - -docker pull postgres:9.5 -docker pull consul:0.8.3 -docker pull nginx:latest -docker pull onapdcae/registrator:v7 -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.configbinding.app-app:{{ dcae_docker_cbs }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.ves.vescollector:{{ dcae_docker_ves }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.tca-cdap-container:{{ dcae_docker_tca }} -docker pull {{ nexus_docker_repo }}/onap/holmes/engine-management:{{ holmes_docker_em }} -docker pull {{ nexus_docker_repo }}/onap/holmes/rule-management:{{ holmes_docker_rm }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.inventory-api:{{ dcae_docker_inv }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.servicechange-handler:{{ dcae_docker_sch }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.deployment-handler:{{ dcae_docker_dh }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.policy-handler:{{ dcae_docker_ph }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.snmptrap:{{ dcae_docker_snmptrap }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.prh.prh-app-server:{{ dcae_docker_prh }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:{{ dcae_docker_hvves }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.datafile.datafile-app-server:{{ dcae_docker_datafile }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.universalvesadaptor:{{ dcae_docker_mua }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.snmpmapper:{{ dcae_docker_msnmp }} -docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.heartbeat:{{ dcae_docker_heartbeat }} diff --git a/heat/register.sh b/heat/register.sh deleted file mode 100755 index 34c1505..0000000 --- a/heat/register.sh +++ /dev/null @@ -1,605 +0,0 @@ -#!/bin/bash - -############################################################################# -# -# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -############################################################################# - - - -# We now register services that are not handled by Registrator -# minimum platform components -HOSTNAME_CONSUL="consul" -SRVCNAME_CONSUL="consul" -HOSTNAME_CM="cloudify-manager" -SRVCNAME_CM="cloudify_manager" -HOSTNAME_CBS="config-binding-service" -SRVCNAME_CBS="config_binding_service" - -# R3 MVP service components -HOSTNAME_MVP_VES="mvp-dcaegen2-collectors-ves" -SRVCNAME_MVP_VES="mvp-dcaegen2-collectors-ves" -HOSTNAME_MVP_TCA="mvp-dcaegen2-analytics-tca" -SRVCNAME_MVP_TCA="mvp-dcaegen2-analytics-tca" -HOSTNAME_MVP_HR="mvp-dcaegen2-analytics-holmes-rule-management" -SRVCNAME_MVP_HR="mvp-dcaegen2-analytics-holmes-rule-management" -HOSTNAME_MVP_HE="mvp-dcaegen2-analytics-holmes-engine-management" -SRVCNAME_MVP_HE="mvp-dcaegen2-analytics-holmes-engine-management" - -# R3 PLUS service components -HOSTNAME_STATIC_SNMPTRAP="static-dcaegen2-collectors-snmptrap" -SRVCNAME_STATIC_SNMPTRAP="static-dcaegen2-collectors-snmptrap" -HOSTNAME_STATIC_MAPPER="static-dcaegen2-services-mapper" -SRVCNAME_STATIC_MAPPER="static-dcaegen2-services-mapper" -HOSTNAME_STATIC_HEARTBEAT="static-dcaegen2-services-heartbeat" -SRVCNAME_STATIC_HEARTBEAT="static-dcaegen2-services-heartbeat" -HOSTNAME_STATIC_PRH="static-dcaegen2-services-prh" -SRVCNAME_STATIC_PRH="static-dcaegen2-services-prh" -HOSTNAME_STATIC_HVVES="static-dcaegen2-collectors-hvves" -SRVCNAME_STATIC_HVVES="static-dcaegen2-collectors-hvves" -HOSTNAME_STATIC_DFC="static-dcaegen2-collectors-datafile" -SRVCNAME_STATIC_DFC="static-dcaegen2-collectors-datafile" - - -# registering docker host -SVC_NAME="dockerhost" -SVC_IP="$(cat /opt/config/dcae_float_ip.txt)" -REGREQ=" -{ - \"Name\" : \"${SVC_NAME}\", - \"ID\" : \"${SVC_NAME}\", - \"Address\": \"${SVC_IP}\", - \"Port\": 2376, - \"Check\" : { - \"Name\" : \"${SVC_NAME}_health\", - \"Interval\" : \"15s\", - \"HTTP\" : \"http://${SVC_IP}:2376/containers/registrator/json\", - \"Status\" : \"passing\" - } -} -" -curl -v -X PUT -H 'Content-Type: application/json' \ ---data-binary "$REGREQ" \ -"http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" - -#Add KV for dockerplugin login -REGREQ=" -[ - { - \"username\": \"docker\", - \"password\": \"docker\", - \"registry\": \"nexus3.onap.org:10001\" - } -] -" -curl -v -X PUT -H 'Content-Type: application/json' \ ---data-binary "$REGREQ" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/docker_plugin/docker_logins" - - -# registering deployment handler -SVC_NAME="deployment_handler" -SVC_IP="$(cat /opt/config/dcae_ip_addr.txt)" -REGREQ=" -{ - \"Name\" : \"${SVC_NAME}\", - \"ID\" : \"${SVC_NAME}\", - \"Address\": \"${SVC_IP}\", - \"Port\": 8188, - \"Check\" : { - \"Name\" : \"${SVC_NAME}_health\", - \"Interval\" : \"15s\", - \"HTTP\" : \"https://${SVC_IP}:8188/\", - \"tls_skip_verify\": true, - \"Status\" : \"passing\" - } -} -" -curl -v -X PUT -H 'Content-Type: application/json' \ ---data-binary \ -"$REGREQ" "http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" - - -# registering Holmes services -SVC_NAME="${SRVCNAME_MVP_HR}" -SVC_IP="$(cat /opt/config/dcae_ip_addr.txt)" -REGREQ=" -{ - \"Name\" : \"${SVC_NAME}\", - \"ID\" : \"${SVC_NAME}\", - \"Address\": \"${SVC_IP}\", - \"Port\": 9101, - \"Check\" : { - \"Name\" : \"${SVC_NAME}_health\", - \"Interval\" : \"15s\", - \"HTTP\" : \"https://${SVC_IP}:9101/api/holmes-rule-mgmt/v1/healthcheck\", - \"tls_skip_verify\": true, - \"Status\" : \"passing\" - } -} -" -curl -v -X PUT -H 'Content-Type: application/json' \ ---data-binary \ -"$REGREQ" "http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" - - -SVC_NAME="${SRVCNAME_MVP_HE}" -SVC_IP="$(cat /opt/config/dcae_ip_addr.txt)" -REGREQ=" -{ - \"Name\" : \"${SVC_NAME}\", - \"ID\" : \"${SVC_NAME}\", - \"Address\": \"${SVC_IP}\", - \"Port\": 9102, - \"Check\" : { - \"Name\" : \"${SVC_NAME}_health\", - \"Interval\" : \"15s\", - \"HTTP\" : \"https://${SVC_IP}:9102/api/holmes-engine-mgmt/v1/healthcheck\", - \"tls_skip_verify\": true, - \"Status\" : \"passing\" - } -} -" -curl -v -X PUT -H 'Content-Type: application/json' \ ---data-binary "$REGREQ" \ -"http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" - - - -# now push KVs -# generated with https://www.browserling.com/tools/json-escape -# config binding service -REGKV=" -{} -" -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -http://${HOSTNAME_CONSUL}:8500/v1/kv/config_binding_service -# checked - - - -# inventory -REGKV=' -{ - "database": { - "checkConnectionWhileIdle": false, - "driverClass": "org.postgresql.Driver", - "evictionInterval": "10s", - "initialSize": 2, - "maxSize": 8, - "maxWaitForConnection": "1s", - "minIdleTime": "1 minute", - "minSize": 2, - "password": "inventorypwd", - "properties": { - "charSet": "UTF-8"}, - "url": "jdbc:postgresql://pgInventory:5432/postgres", - "user": "inventory", - "validationQuery": "/* MyService Health Check */ SELECT 1" - }, - "databusControllerConnection": { - "host": "databus-controller-hostname", - "mechId": null, - "password": null, - "port": 8443, - "required": false}, - "httpClient": { - "connectionTimeout": "5000milliseconds", - "gzipEnabled": false, - "gzipEnabledForRequests": false, - "maxThreads": 128, - "minThreads": 1, - "timeout": "5000milliseconds" - } - } - } -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -http://${HOSTNAME_CONSUL}:8500/v1/kv/inventory -# checked - - -# policy handler -REGKV=' -{ - "policy_handler": { - "deploy_handler": { - "target_entity": "deployment_handler", - "tls_ca_mode": "do_not_verify", - "max_msg_length_mb": 5, - "url" : "https://{{ dcae_ip_addr }}:8188", - "tls_ca_mode" : "cert_directory", - "query": { - "cfy_tenant_name": "default_tenant" - } - }, - "thread_pool_size": 4, - "policy_retry_count": 5, - "pool_connections": 20, - "policy_retry_sleep": 5, - "catch_up": { - "interval": 1200 - }, - "reconfigure": { - "interval": 600 - }, - "policy_engine": { - "path_decision": "/decision/v1", - "path_api": "/pdp/api/", - "path_notifications" : "/pdp/notifications", - "tls_ca_mode" : "cert_directory", - "tls_wss_ca_mode" : "cert_directory", - "headers": { - "Environment": "TEST", - "ClientAuth": "cHl0aG9uOnRlc3Q=", - "Content-Type": "application/json", - "Accept": "application/json", - "Authorization": "Basic dGVzdHBkcDphbHBoYTEyMw==" - }, - "url": "https://{{ policy_ip_addr }}:8081", - "target_entity": "policy_engine" - } - } -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/policy_handler" - - -# service change handler -REGKV=' -{ - "asdcDistributionClient": { - "asdcAddress": "{{ sdc_ip_addr }}:8443", - "asdcUri": "https://{{ sdc_ip_addr }}:8443", - "msgBusAddress": "{{ mr_ip_addr }}", - "user": "dcae", - "password": "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U", - "pollingInterval": 20, - "pollingTimeout": 20, - "consumerGroup": "dcae", - "consumerId": "dcae-sch", - "environmentName": "AUTO", - "keyStorePath": null, - "keyStorePassword": null, - "activateServerTLSAuth": false, - "useHttpsWithDmaap": false, - "isFilterInEmptyResources": false - }, - "dcaeInventoryClient": { - "uri": "http://inventory:8080" - } -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/service-change-handler" - - -# deployment handler -REGKV=' -{ - "logLevel": "DEBUG", - "cloudify": { - "protocol": "http" - }, - "inventory": { - "protocol": "http" - } -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/deployment_handler" - - -# ves -MR_IP="$(cat /opt/config/mr_ip_addr.txt)" -REGKV=' -{ - "event.transform.flag": "0", - "tomcat.maxthreads": "200", - "collector.schema.checkflag": "1", - "collector.dmaap.streamid": "fault=ves_fault|syslog=ves_syslog|heartbeat=ves_heartbeat|measurementsForVfScaling=ves_measurement|mobileFlow=ves_mobileflow|other=ves_other|stateChange=ves_statechange|thresholdCrossingAlert=ves_thresholdCrossingAlert|voiceQuality=ves_voicequality|sipSignaling=ves_sipsignaling", - "collector.service.port": "8080", - "collector.schema.file": "{\"v1\":\"./etc/CommonEventFormat_27.2.json\",\"v2\":\"./etc/CommonEventFormat_27.2.json\",\"v3\":\"./etc/CommonEventFormat_27.2.json\",\"v4\":\"./etc/CommonEventFormat_27.2.json\",\"v5\":\"./etc/CommonEventFormat_28.4.1.json\"}", - "collector.keystore.passwordfile": "/opt/app/VESCollector/etc/passwordfile", - "collector.inputQueue.maxPending": "8096", - "streams_publishes": { - "ves_measurement": { - "type": "message_router", - "dmaap_info": { - "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.VES_MEASUREMENT_OUTPUT/" - } - }, - "ves_fault": { - "type": "message_router", - "dmaap_info": { - "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.SEC_FAULT_OUTPUT/" - } - } - }, - "collector.service.secure.port": "8443", - "header.authflag": "0", - "collector.keystore.file.location": "/opt/app/VESCollector/etc/keystore", - "collector.keystore.alias": "dynamically generated", - "services_calls": [], - "header.authlist": "userid1,base64encodepwd1|userid2,base64encodepwd2" -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcaegen2-collectors-ves" - - -# holmes rule management -MSB_IP="$(cat /opt/config/msb_ip_addr.txt)" -REGKV=" -{ - \"streams_subscribes\": {}, - \"msb.hostname\": \"${MSB_IP_ADDR}\", - \"msb.uri\": \"/api/microservices/v1/services\", - \"streams_publishes\": {}, - \"holmes.default.rule.volte.scenario1\": \"ControlLoop-VOLTE-2179b738-fd36-4843-a71a-a8c24c70c55b\$\$\$package org.onap.holmes.droolsRule;\\n\\nimport org.onap.holmes.common.dmaap.DmaapService;\\nimport org.onap.holmes.common.api.stat.VesAlarm;\\nimport org.onap.holmes.common.aai.CorrelationUtil;\\nimport org.onap.holmes.common.dmaap.entity.PolicyMsg;\\nimport org.onap.holmes.common.dropwizard.ioc.utils.ServiceLocatorHolder;\\nimport org.onap.holmes.common.utils.DroolsLog;\\n \\n\\nrule \\\"Relation_analysis_Rule\\\"\\nsalience 200\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 0,\\n \$sourceId: sourceId, sourceId != null && !sourceId.equals(\\\"\\\"),\\n\\t\\t\\t\$sourceName: sourceName, sourceName \!= null \&\& \!sourceName.equals(\\\"\\\"),\\n\\t\\t\\t\$startEpochMicrosec: startEpochMicrosec,\\n eventName in (\\\"Fault_MultiCloud_VMFailure\\\"),\\n \$eventId: eventId)\\n \$child : VesAlarm( eventId \!= $eventId, parentId == null,\\n CorrelationUtil.getInstance().isTopologicallyRelated(sourceId, \$sourceId, \$sourceName),\\n eventName in (\\\"Fault_MME_eNodeB out of service alarm\\\"),\\n startEpochMicrosec \< \$startEpochMicrosec + 60000 \&\& startEpochMicrosec \> \$startEpochMicrosec - 60000 )\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"Relation_analysis_Rule: rootId=\\\" + \$root.getEventId() + \\\", childId=\\\" + \$child.getEventId());\\n\\t\\t\$child.setParentId(\$root.getEventId());\\n\\t\\tupdate(\$child);\\n\\t\\t\\nend\\n\\nrule \\\"root_has_child_handle_Rule\\\"\\nsalience 150\\nno-loop true\\n\\twhen\\n\\t\\t\$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0, \$eventId: eventId)\\n\\t\\t\$child : VesAlarm(eventId \!= $eventId, parentId == $eventId)\\n\\tthen\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_has_child_handle_Rule: rootId=\\\" + \$root.getEventId() + \\\", childId=\\\" + $child.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, \$child, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\t\$root.setRootFlag(1);\\n\\t\\tupdate(\$root);\\nend\\n\\nrule \\\"root_no_child_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0,\\n sourceId \!= null \&\& \!sourceId.equals(\\\"\\\"),\\n\\t\\t\\tsourceName \!= null \&\& \!sourceName.equals(\\\"\\\"),\\n eventName in (\\\"Fault_MultiCloud_VMFailure\\\"))\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_no_child_handle_Rule: rootId=\\\" + \$root.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, null, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\t$root.setRootFlag(1);\\n\\t\\tupdate(\$root);\\nend\\n\\nrule \\\"root_cleared_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 1, rootFlag == 1)\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_cleared_handle_Rule: rootId=\\\" + \$root.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, null, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\tretract(\$root);\\nend\\n\\nrule \\\"child_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$child : VesAlarm(alarmIsCleared == 1, rootFlag == 0)\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"child_handle_Rule: childId=\\\" + \$child.getEventId());\\n\\t\\tretract(\$child);\\nend\", - \"services_calls\": {} -}" - - - -REGKV=' -{ - "streams_subscribes": {}, - "msb.hostname": "{{ msb_ip_addr }}", - "msb.uri": "/api/microservices/v1/services", - "streams_publishes": {}, - "holmes.default.rule.volte.scenario1": "ControlLoop-VOLTE-2179b738-fd36-4843-a71a-a8c24c70c55b$$$package org.onap.holmes.droolsRule;\n\nimport org.onap.holmes.common.dmaap.DmaapService;\nimport org.onap.holmes.common.api.stat.VesAlarm;\nimport org.onap.holmes.common.aai.CorrelationUtil;\nimport org.onap.holmes.common.dmaap.entity.PolicyMsg;\nimport org.onap.holmes.common.dropwizard.ioc.utils.ServiceLocatorHolder;\nimport org.onap.holmes.common.utils.DroolsLog;\n \n\nrule \"Relation_analysis_Rule\"\nsalience 200\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 0,\n $sourceId: sourceId, sourceId != null && !sourceId.equals(\"\"),\n\t\t\t$sourceName: sourceName, sourceName != null && !sourceName.equals(\"\"),\n\t\t\t$startEpochMicrosec: startEpochMicrosec,\n eventName in (\"Fault_MultiCloud_VMFailure\"),\n $eventId: eventId)\n $child : VesAlarm( eventId != $eventId, parentId == null,\n CorrelationUtil.getInstance().isTopologicallyRelated(sourceId, $sourceId, $sourceName),\n eventName in (\"Fault_MME_eNodeB out of service alarm\"),\n startEpochMicrosec < $startEpochMicrosec + 60000 && startEpochMicrosec > $startEpochMicrosec - 60000 )\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"Relation_analysis_Rule: rootId=\" + $root.getEventId() + \", childId=\" + $child.getEventId());\n\t\t$child.setParentId($root.getEventId());\n\t\tupdate($child);\n\t\t\nend\n\nrule \"root_has_child_handle_Rule\"\nsalience 150\nno-loop true\n\twhen\n\t\t$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0, $eventId: eventId)\n\t\t$child : VesAlarm(eventId != $eventId, parentId == $eventId)\n\tthen\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_has_child_handle_Rule: rootId=\" + $root.getEventId() + \", childId=\" + $child.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, $child, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\t$root.setRootFlag(1);\n\t\tupdate($root);\nend\n\nrule \"root_no_child_handle_Rule\"\nsalience 100\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 0, rootFlag == 0,\n sourceId != null && !sourceId.equals(\"\"),\n\t\t\tsourceName != null && !sourceName.equals(\"\"),\n eventName in (\"Fault_MultiCloud_VMFailure\"))\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_no_child_handle_Rule: rootId=\" + $root.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, null, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\t$root.setRootFlag(1);\n\t\tupdate($root);\nend\n\nrule \"root_cleared_handle_Rule\"\nsalience 100\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 1, rootFlag == 1)\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_cleared_handle_Rule: rootId=\" + $root.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, null, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\tretract($root);\nend\n\nrule \"child_handle_Rule\"\nsalience 100\nno-loop true\n when\n $child : VesAlarm(alarmIsCleared == 1, rootFlag == 0)\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"child_handle_Rule: childId=\" + $child.getEventId());\n\t\tretract($child);\nend", - "services_calls": {} -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcae-analytics-holmes-rule-management" - - - -# Holmes engine management -REGKV=' -{ - "msb.hostname": "10.0.14.1", - "services_calls": {}, - "msb.uri": "/api/microservices/v1/services", - "streams_publishes": { - "dcae_cl_out": { - "type": "message_router", - "dmaap_info": { - "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.DCAE_CL_OUTPUT" - } - } - }, - "streams_subscribes": { - "ves_fault": { - "type": "message_router", - "dmaap_info": { - "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.SEC_FAULT_OUTPUT" - } - } - } -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcae-analytics-holmes-engine-management" - - -#curl http://localhost:8500/v1/kv/config_binding_service |jq .[0].Value |sed -e 's/\"//g' |base64 --decode - - - -# TCA -REGKV=' -{ - "thresholdCalculatorFlowletInstances": "2", - "tcaVESMessageStatusTableTTLSeconds": "86400", - "tcaVESMessageStatusTableName": "TCAVESMessageStatusTable", - "tcaVESAlertsTableTTLSeconds": "1728000", - "tcaVESAlertsTableName": "TCAVESAlertsTable", - "tcaSubscriberOutputStreamName": "TCASubscriberOutputStream", - "tcaAlertsAbatementTableTTLSeconds": "1728000", - "tcaAlertsAbatementTableName": "TCAAlertsAbatementTable", - "streams_subscribes": {}, - "streams_publishes": {}, - "services_calls": {}, - "appName": "dcae-tca", - "appDescription": "DCAE Analytics Threshold Crossing Alert Application" -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcaegen2-analytics-tca" - - -# TCA pref -REGKV='{ - "tca_policy": "{\"domain\":\"measurementsForVfScaling\",\"metricsPerEventName\":[{\"eventName\":\"vFirewallBroadcastPackets\",\"controlLoopSchemaType\":\"VNF\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":300,\"direction\":\"LESS_OR_EQUAL\",\"severity\":\"MAJOR\",\"closedLoopEventStatus\":\"ONSET\"},{\"closedLoopControlName\":\"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":700,\"direction\":\"GREATER_OR_EQUAL\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]},{\"eventName\":\"vLoadBalancer\",\"controlLoopSchemaType\":\"VM\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":300,\"direction\":\"GREATER_OR_EQUAL\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]},{\"eventName\":\"Measurement_vGMUX\",\"controlLoopSchemaType\":\"VNF\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\",\"thresholdValue\":0,\"direction\":\"EQUAL\",\"severity\":\"MAJOR\",\"closedLoopEventStatus\":\"ABATED\"},{\"closedLoopControlName\":\"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\",\"thresholdValue\":0,\"direction\":\"GREATER\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]}]}", - "subscriberTopicName": "unauthenticated.VES_MEASUREMENT_OUTPUT", - "subscriberTimeoutMS": "-1", - "subscriberProtocol": "http", - "subscriberPollingInterval": "30000", - "subscriberMessageLimit": "-1", - "subscriberHostPort": "3904", - "subscriberHostName":"{{ mr_ip_addr }}", - "subscriberContentType": "application/json", - "subscriberConsumerId": "c12", - "subscriberConsumerGroup": "OpenDCAE-c12", - "publisherTopicName": "unauthenticated.DCAE_CL_OUTPUT", - "publisherProtocol": "http", - "publisherPollingInterval": "20000", - "publisherMaxRecoveryQueueSize": "100000", - "publisherMaxBatchSize": "1", - "publisherHostPort": "3904", - "publisherHostName": "{{ mr_ip_addr }}", - "publisherContentType": "application/json", - "enableAlertCEFFormat": "false", - "enableAAIEnrichment": true, - "aaiVNFEnrichmentAPIPath": "/aai/v11/network/generic-vnfs/generic-vnf", - "aaiVMEnrichmentAPIPath": "/aai/v11/search/nodes-query", - "aaiEnrichmentUserPassword": "DCAE", - "aaiEnrichmentUserName": "DCAE", - "aaiEnrichmentProtocol": "https", - "aaiEnrichmentPortNumber": "8443", - "aaiEnrichmentIgnoreSSLCertificateErrors": "true", - "aaiEnrichmentHost":"{{ aai1_ip_addr }}", - "enableRedisCaching":false -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcaegen2-analytics-tca:preferences" - - - -# SNMP Trap Collector -SERVICENAME="${SRVCNAME_STATIC_SNMPTRAP}" -REGKV='{ - "files": { - "roll_frequency": "day", - "data_dir": "data", - "arriving_traps_log": "snmptrapd_arriving_traps.log", - "minimum_severity_to_log": 2, - "traps_stats_log": "snmptrapd_stats.csv", - "perm_status_file": "snmptrapd_status.log", - "pid_dir": "tmp", - "eelf_audit": "audit.log", - "log_dir": "logs", - "eelf_metrics": "metrics.log", - "eelf_base_dir": "/opt/app/snmptrap/logs", - "runtime_base_dir": "/opt/app/snmptrap", - "eelf_error": "error.log", - "eelf_debug": "debug.log", - "snmptrapd_diag": "snmptrapd_prog_diag.log" - }, - "publisher": { - "http_milliseconds_between_retries": 750, - "max_milliseconds_between_publishes": 10000, - "max_traps_between_publishes": 10, - "http_retries": 3, - "http_primary_publisher": "true", - "http_milliseconds_timeout": 1500, - "http_peer_publisher": "unavailable" - }, - "snmptrapd": { - "version": "1.4.0", - "title": "Collector for receiving SNMP traps and publishing to DMAAP/MR" - }, - "cache": { - "dns_cache_ttl_seconds": 60 - }, - "sw_interval_in_seconds": 60, - "streams_publishes": { - "sec_fault_unsecure": { - "type": "message_router", - "dmaap_info": { - "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.ONAP-COLLECTOR-SNMPTRAP" - } - } - }, - "StormWatchPolicy": "", - "services_calls": {}, - "protocols": { - "ipv4_interface": "0.0.0.0", - "ipv4_port": 6162, - "ipv6_interface": "::1", - "ipv6_port": 6162 - } -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" - - - -# hv-ves collector -SERVICENAME="${SRVCNAME_STATIC_HVVES}" -REGKV='{ - "dmaap.kafkaBootstrapServers": "{{ mr_ip_addr }}:9092", - "collector.routing": { - "fromDomain": "HVMEAS", - "toTopic": "HV_VES_MEASUREMENTS" - } -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" - - -# data file collector -SERVICENAME="${SRVCNAME_STATIC_DFC}" - REGKV='{ - "dmaap.dmaapConsumerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", - "dmaap.dmaapConsumerConfiguration.dmaapPortNumber": 2222, - "dmaap.dmaapConsumerConfiguration.dmaapTopicName": "/events/unauthenticated.VES_NOTIFICATION_OUTPUT", - "dmaap.dmaapConsumerConfiguration.dmaapProtocol": "http", - "dmaap.dmaapConsumerConfiguration.dmaapUserName": "", - "dmaap.dmaapConsumerConfiguration.dmaapUserPassword": "", - "dmaap.dmaapConsumerConfiguration.dmaapContentType": "application/json", - "dmaap.dmaapConsumerConfiguration.consumerId": "C12", - "dmaap.dmaapConsumerConfiguration.consumerGroup": "OpenDcae-c12", - "dmaap.dmaapConsumerConfiguration.timeoutMs": -1, - "dmaap.dmaapConsumerConfiguration.messageLimit": 1, - "dmaap.dmaapProducerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", - "dmaap.dmaapProducerConfiguration.dmaapPortNumber": 3907, - "dmaap.dmaapProducerConfiguration.dmaapTopicName": "publish", - "dmaap.dmaapProducerConfiguration.dmaapProtocol": "https", - "dmaap.dmaapProducerConfiguration.dmaapUserName": "dradmin", - "dmaap.dmaapProducerConfiguration.dmaapUserPassword": "dradmin", - "dmaap.dmaapProducerConfiguration.dmaapContentType": "application/octet-stream", - "ftp.ftpesConfiguration.keyCert": "config/ftpKey.jks", - "ftp.ftpesConfiguration.keyPassword": "secret", - "ftp.ftpesConfiguration.trustedCA": "config/cacerts", - "ftp.ftpesConfiguration.trustedCAPassword": "secret" - }' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" - - -# PNF Registration Handler -SERVICENAME="${SRVCNAME_STATIC_PRH}" -REGKV='{ - "dmaap.dmaapProducerConfiguration.dmaapTopicName": "/events/unauthenticated.PNF_READY", - "dmaap.dmaapConsumerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", - "aai.aaiClientConfiguration.aaiPnfPath": "/network/pnfs/pnf", - "aai.aaiClientConfiguration.aaiUserPassword": "AAI", - "dmaap.dmaapConsumerConfiguration.dmaapUserName": "admin", - "aai.aaiClientConfiguration.aaiBasePath": "/aai/v12", - "dmaap.dmaapConsumerConfiguration.timeoutMs": -1, - "dmaap.dmaapProducerConfiguration.dmaapPortNumber": 3904, - "aai.aaiClientConfiguration.aaiHost": "{{ aai1_ip_addr }}", - "dmaap.dmaapConsumerConfiguration.dmaapUserPassword": "admin", - "dmaap.dmaapProducerConfiguration.dmaapProtocol": "http", - "aai.aaiClientConfiguration.aaiIgnoreSslCertificateErrors": true, - "dmaap.dmaapProducerConfiguration.dmaapContentType": "application/json", - "dmaap.dmaapConsumerConfiguration.dmaapTopicName": "/events/unauthenticated.VES_PNFREG_OUTPUT", - "dmaap.dmaapConsumerConfiguration.dmaapPortNumber": 3904, - "dmaap.dmaapConsumerConfiguration.dmaapContentType": "application/json", - "dmaap.dmaapConsumerConfiguration.messageLimit": -1, - "dmaap.dmaapConsumerConfiguration.dmaapProtocol": "http", - "aai.aaiClientConfiguration.aaiUserName": "AAI", - "dmaap.dmaapConsumerConfiguration.consumerId": "c12", - "dmaap.dmaapProducerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", - "aai.aaiClientConfiguration.aaiHostPortNumber": 8443, - "dmaap.dmaapConsumerConfiguration.consumerGroup": "OpenDCAE-c12", - "aai.aaiClientConfiguration.aaiProtocol": "https", - "dmaap.dmaapProducerConfiguration.dmaapUserName": "admin", - "dmaap.dmaapProducerConfiguration.dmaapUserPassword": "admin" -}' -curl -v -X PUT -H "Content-Type: application/json" \ ---data "${REGKV}" \ -"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" diff --git a/heat/setup.sh b/heat/setup.sh deleted file mode 100755 index b95e56e..0000000 --- a/heat/setup.sh +++ /dev/null @@ -1,142 +0,0 @@ -#!/bin/bash -############################################################################# -# -# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -############################################################################# - - -NETWORK="config_default" - -echo "Cleaning up any previously deployed cludify manager and registrator" -docker stop registrator cloudify-manager -docker rm registrator cloudify-manager - -echo "Launching registrator on dockerhost" -docker run -d \ ---network=${NETWORK} \ ---name=registrator \ --e EXTERNAL_IP={{ dcae_ip_addr }} \ --e CONSUL_HOST=consul \ --v /var/run/docker.sock:/tmp/docker.sock \ -onapdcae/registrator:v7 - - - - -rm -rf scripts-in-container -mkdir scripts-in-container -cat > scripts-in-container/install-plugins.sh << EOL -#!/bin/bash -source /cfy42/bin/activate -pip install pip==9.0.3 -cfy profiles use 127.0.0.1 -u admin -p admin -t default_tenant -cfy status -cd /tmp/bin -./build-plugins.sh https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R4 https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases -for wagon in ./wagons/*.wgn; do cfy plugins upload \$wagon ; done -deactivate -EOL - -#wget -O scripts-in-container/build-plugins.sh https://git.onap.org/dcaegen2/deployments/plain/k8s-bootstrap-container/build-plugins.sh -cat > scripts-in-container/build-plugins.sh << EOL -#!/bin/bash - -# Pull plugin archives from repos -# Build wagons -# $1 is the DCAE repo URL -# $2 is the CCSDK repo URL -# (This script runs at Docker image build time) -# -set -x -DEST=wagons - -# For DCAE, we get zips of the archives and build wagons -DCAEPLUGINFILES=\ -"\ -relationshipplugin/1.0.0/relationshipplugin-1.0.0.tgz -dcaepolicyplugin/2.3.0/dcaepolicyplugin-2.3.0.tgz -dockerplugin/3.2.1/dockerplugin-3.2.1.tgz \ -" - -# For CCSDK, we pull down the wagon files directly -CCSDKPLUGINFILES=\ -"\ -plugins/pgaas-1.1.0-py27-none-any.wgn -plugins/sshkeyshare-1.0.0-py27-none-any.wgn -" - -# Build a set of wagon files from archives in a repo -# $1 -- repo base URL -# $2 -- list of paths to archive files in the repo -function build { - for plugin in $2 - do - # Could just do wagon create with the archive URL as source, - # but can't use a requirements file with that approach - mkdir work - target=$(basename ${plugin}) - curl -Ss $1/${plugin} > ${target} - tar zxvf ${target} --strip-components=2 -C work - wagon create -t tar.gz -o ${DEST} -r work/requirements.txt --validate ./work - rm -rf work - done -} - -# Copy a set of wagons from a repo -# $1 -- repo baseURL -# $2 -- list of paths to wagons in the repo -function get_wagons { - for wagon in $2 - do - target=$(basename ${wagon}) - curl -Ss $1/${wagon} > ${DEST}/${target} - done -} - -mkdir ${DEST} -build $1 "${DCAEPLUGINFILES}" -get_wagons $2 "${CCSDKPLUGINFILES}" -EOL - -chmod 777 scripts-in-container/* - -echo "Launching Cloudify Manager container" -docker run -d \ ---network="${NETWORK}" \ ---name cloudify-manager \ ---restart unless-stopped \ --v /sys/fs/cgroup:/sys/fs/cgroup:ro \ --v /opt/app/config/scripts-in-container:/tmp/bin \ --p 80:80 \ ---tmpfs /run \ ---tmpfs /run/lock \ ---security-opt seccomp:unconfined \ ---cap-add SYS_ADMIN \ ---label "SERVICE_80_NAME=cloudify_manager" \ ---label "SERVICE_80_CHECK_TCP=true" \ ---label "SERVICE_80_CHECK_INTERVAL=15s" \ ---label "SERVICE_80_CHECK_INITIAL_STATUS=passing" \ -{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.cm-container:{{ dcae_docker_cm }} - -echo "Cloudify Manager deployed, waiting for completion" -while ! nc -z localhost 80; do sleep 1; done - -echo "Upload plugins to Cloudify Manager" - -# run as detached because this script is intended to be run in background -docker exec -itd cloudify-manager /tmp/bin/install-plugins.sh - -echo "Cloudify Manager setup complete" - diff --git a/heat/teardown.sh b/heat/teardown.sh deleted file mode 100755 index 19d74a7..0000000 --- a/heat/teardown.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -############################################################################# -# -# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -############################################################################# - - -cd /opt/app/config - -echo "Stop and remove cloudify-manager registrator dcae-health" -docker stop cloudify-manager registrator dcae-health -docker rm cloudify-manager registrator dcae-health - -echo "Stand down R2PLUS service components" -/opt/docker/docker-compose -f ./docker-compose-4.yaml down -echo "Stand down R2 platform components" -/opt/docker/docker-compose -f ./docker-compose-3.yaml down -echo "Stand down R2 minimum service components" -/opt/docker/docker-compose -f ./docker-compose-2.yaml down -echo "Stand down R2 shared platform components" -/opt/docker/docker-compose -f ./docker-compose-1.yaml down -echo "Teardown done" diff --git a/k8s-bootstrap-container/bootstrap.sh b/k8s-bootstrap-container/bootstrap.sh index 0c55559..503cec2 100755 --- a/k8s-bootstrap-container/bootstrap.sh +++ b/k8s-bootstrap-container/bootstrap.sh @@ -193,6 +193,7 @@ set +e # Deploy platform components # Allow for some parallelism to speed up the process. Probably could be somewhat more aggressive. deploy pgaas_initdb k8s-pgaas-initdb.yaml k8s-pgaas-initdb-inputs.yaml & +deploy dashboard k8s-dashboard.yaml k8s-dashboard-inputs.yaml & PG_PID=$! wait ${PG_PID} diff --git a/k8s-bootstrap-container/build-plugins.sh b/k8s-bootstrap-container/build-plugins.sh index f7bc393..388f234 100755 --- a/k8s-bootstrap-container/build-plugins.sh +++ b/k8s-bootstrap-container/build-plugins.sh @@ -42,6 +42,7 @@ dcaepolicyplugin/2.3.0/dcaepolicyplugin-2.3.0.tgz \ CCSDKPLUGINFILES=\ "\ plugins/pgaas-1.1.0-py27-none-any.wgn +plugins/dmaap-1.3.2-py27-none-any.wgn plugins/sshkeyshare-1.0.0-py27-none-any.wgn plugins/helm-4.0.0-py27-none-linux_x86_64.wgn " diff --git a/k8s-bootstrap-container/load-blueprints.sh b/k8s-bootstrap-container/load-blueprints.sh index e42a72e..e6838e6 100755 --- a/k8s-bootstrap-container/load-blueprints.sh +++ b/k8s-bootstrap-container/load-blueprints.sh @@ -30,7 +30,9 @@ k8s-ves.yaml \ k8s-snmptrap.yaml \ k8s-prh.yaml \ k8s-hv-ves.yaml \ -k8s-datafile-collector.yaml +k8s-helm-override.yaml \ +k8s-helm.yaml \ +k8s-dashboard.yaml " BPDEST=blueprints diff --git a/k8s-bootstrap-container/pom.xml b/k8s-bootstrap-container/pom.xml index dc7393a..98ce298 100644 --- a/k8s-bootstrap-container/pom.xml +++ b/k8s-bootstrap-container/pom.xml @@ -27,7 +27,7 @@ limitations under the License. org.onap.dcaegen2.deployments k8s-bootstrap-container dcaegen2-deployments-k8s-bootstrap-container - 1.4.11 + 1.4.12 http://maven.apache.org UTF-8 diff --git a/pnda-bootstrap-container/Dockerfile b/pnda-bootstrap-container/Dockerfile deleted file mode 100644 index 4fefa8a..0000000 --- a/pnda-bootstrap-container/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# ============LICENSE_START======================================================= -# org.onap.dcae -# ================================================================================ -# Copyright (c) 2018 Cisco Systems. All rights reserved. -# ================================================================================ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============LICENSE_END========================================================= - -FROM python:2.7.15 as build - -ARG PNDACLITAG=release/5.0 -ARG PLATFORMSALTTAG=release/5.0 - -RUN git clone -b $PNDACLITAG https://github.com/pndaproject/pnda-cli.git -RUN git clone -b $PLATFORMSALTTAG https://github.com/pndaproject/platform-salt.git - -RUN pip2 install --no-cache-dir -r pnda-cli/cli/requirements.txt --install-option="--prefix=/install" - -FROM python:2.7.15-alpine3.8 -COPY --from=build /install /usr/local -COPY --from=build /pnda-cli /pnda-cli -COPY --from=build /platform-salt /platform-salt - -RUN apk add --no-cache curl jq openssl openssh diff --git a/pnda-bootstrap-container/README.md b/pnda-bootstrap-container/README.md deleted file mode 100644 index 43e089e..0000000 --- a/pnda-bootstrap-container/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# PNDA Boostrap container -## Purpose -The artifacts in this directory build a Docker image including the PNDA CLI. -The CLI allows the bootstrap of a PNDA container. - -## Running the Container -The container is intended to be launched via a Helm chart as part -of the ONAP deployment process, guided by OOM. diff --git a/pnda-bootstrap-container/pom.xml b/pnda-bootstrap-container/pom.xml deleted file mode 100644 index 3c8dae5..0000000 --- a/pnda-bootstrap-container/pom.xml +++ /dev/null @@ -1,144 +0,0 @@ - - - - 4.0.0 - - org.onap.dcaegen2.deployments - deployments - 1.2.0-SNAPSHOT - - org.onap.dcaegen2.deployments - pnda-bootstrap-container - dcaegen2-deployments-pnda-bootstrap-container - 5.0.0 - http://maven.apache.org - - UTF-8 - true - . - py - Python - **/*.py - - - ${project.artifactId}-${project.version} - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - clean phase script - clean - - exec - - - - ${project.artifactId} - clean - - - - - generate-sources script - generate-sources - - exec - - - - ${project.artifactId} - generate-sources - - - - - compile script - compile - - exec - - - - ${project.artifactId} - compile - - - - - package script - package - - exec - - - - ${project.artifactId} - package - - - - - test script - test - - exec - - - - ${project.artifactId} - test - - - - - install script - install - - exec - - - - ${project.artifactId} - install - - - - - deploy script - deploy - - exec - - - - ${project.artifactId} - deploy - - - - - - - - diff --git a/pnda-mirror-container/Dockerfile b/pnda-mirror-container/Dockerfile deleted file mode 100644 index ea7e100..0000000 --- a/pnda-mirror-container/Dockerfile +++ /dev/null @@ -1,47 +0,0 @@ -# ============LICENSE_START======================================================= -# org.onap.dcae -# ================================================================================ -# Copyright (c) 2018 Cisco Systems. All rights reserved. -# ================================================================================ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============LICENSE_END========================================================= - -FROM centos:7.5.1804 as build - -ARG PNDARELEASE=release/5.0 -# Can be HDP or CDH -ARG HADOOPDIST=HDP - -RUN yum clean all && rm -rf /var/cache/yum/* && yum install gettext git -y - -RUN git clone -b $PNDARELEASE https://github.com/pndaproject/pnda.git - -COPY pnda-5.0-maint.patch / -WORKDIR /pnda -RUN git apply /pnda-5.0-maint.patch - -WORKDIR /pnda/mirror -# Add the -r flag to mirror rpm packages -RUN ./create_mirror.sh -d $HADOOPDIST -r - -WORKDIR /pnda/build -RUN ./install-build-tools.sh - -RUN yum install bzip2 make which -y -RUN source ./set-pnda-env.sh \ - && PARALLEL="--jobs 1" ./build-pnda.sh RELEASE $PNDARELEASE $HADOOPDIST - -FROM nginx:alpine - -COPY --from=build /pnda/mirror/mirror-dist /usr/share/nginx/html/ -COPY --from=build /pnda/build/pnda-dist /usr/share/nginx/html/ diff --git a/pnda-mirror-container/README.md b/pnda-mirror-container/README.md deleted file mode 100644 index 6312439..0000000 --- a/pnda-mirror-container/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# PNDA Mirror -## Purpose -The artifacts in this directory build a Docker image based public PNDA mirror -creation scripts. The container have all the needed offline resources to -deploy a PNDA platform. - -## Running the Container -The container is intended to be launched via a Helm chart as part -of the ONAP deployment process, guided by OOM. It can be run directly -into a native Docker environment, using: -``` -docker run --name pnda-mirror -d --restart unless-stopped \ - -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ - -p :8080 \ - --tmpfs /run \ - --tmpfs /run/lock \ - --security-opt seccomp:unconfined - --cap-add SYS_ADMIN \ - -``` - -We also expect that in a Kubernetes environment the external port mapping would not be -needed. diff --git a/pnda-mirror-container/pnda-5.0-maint.patch b/pnda-mirror-container/pnda-5.0-maint.patch deleted file mode 100644 index 8dc4af1..0000000 --- a/pnda-mirror-container/pnda-5.0-maint.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/mirror/dependencies/pnda-static-file-dependencies.txt b/mirror/dependencies/pnda-static-file-dependencies.txt -index 8f04a87..133e08b 100644 ---- a/mirror/dependencies/pnda-static-file-dependencies.txt -+++ b/mirror/dependencies/pnda-static-file-dependencies.txt -@@ -21,5 +21,5 @@ http://central.maven.org/maven2/org/kitesdk/kite-tools/1.0.0/kite-tools-1.0.0-bi - http://central.maven.org/maven2/org/kitesdk/kite-tools/1.0.0/kite-tools-1.0.0-binary.jar.sha1 - https://releases.hashicorp.com/consul/1.0.3/consul_1.0.3_linux_amd64.zip - https://releases.hashicorp.com/consul/1.0.3/consul_1.0.3_SHA256SUMS --http://www.apache.org/dist/knox/1.1.0/knox-1.1.0.zip --http://www.apache.org/dist/knox/1.1.0/knox-1.1.0.zip.sha1 -+http://archive.apache.org/dist/knox/1.1.0/knox-1.1.0.zip -+http://archive.apache.org/dist/knox/1.1.0/knox-1.1.0.zip.sha1 - diff --git a/pnda-mirror-container/pom.xml b/pnda-mirror-container/pom.xml deleted file mode 100644 index 7ccabb5..0000000 --- a/pnda-mirror-container/pom.xml +++ /dev/null @@ -1,144 +0,0 @@ - - - - 4.0.0 - - org.onap.dcaegen2.deployments - deployments - 1.2.0-SNAPSHOT - - org.onap.dcaegen2.deployments - pnda-mirror-container - dcaegen2-deployments-pnda-mirror-container - 5.0.0 - http://maven.apache.org - - UTF-8 - true - . - py - Python - **/*.py - - - ${project.artifactId}-${project.version} - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - clean phase script - clean - - exec - - - - ${project.artifactId} - clean - - - - - generate-sources script - generate-sources - - exec - - - - ${project.artifactId} - generate-sources - - - - - compile script - compile - - exec - - - - ${project.artifactId} - compile - - - - - package script - package - - exec - - - - ${project.artifactId} - package - - - - - test script - test - - exec - - - - ${project.artifactId} - test - - - - - install script - install - - exec - - - - ${project.artifactId} - install - - - - - deploy script - deploy - - exec - - - - ${project.artifactId} - deploy - - - - - - - - diff --git a/pom.xml b/pom.xml index ee0e322..e0489ba 100644 --- a/pom.xml +++ b/pom.xml @@ -36,7 +36,6 @@ limitations under the License. pom - heat redis-cluster-container cm-container k8s-bootstrap-container @@ -46,7 +45,7 @@ limitations under the License. consul-loader-container multisite-init-container - + UTF-8 diff --git a/settings.xml b/settings.xml new file mode 100644 index 0000000..3330608 --- /dev/null +++ b/settings.xml @@ -0,0 +1,206 @@ + + + /home/vagrant/.m2/repository + + + + openecomp-staging + + + openecomp-staging + openecomp-staging + https://nexus.onap.org/content/repositories/staging/ + + true + never + + + false + + + + + + openecomp-staging + openecomp-staging + https://nexus.onap.org/content/repositories/staging/ + + true + never + + + false + + + + + + openecomp-public + + + openecomp-public + openecomp-public + https://nexus.onap.org/content/repositories/public/ + + true + never + + + false + + + + + + openecomp-public + openecomp-public + https://nexus.onap.org/content/repositories/public/ + + true + never + + + false + + + + + + openecomp-release + + + openecomp-release + openecomp-release + https://nexus.onap.org/content/repositories/releases/ + + true + never + + + false + + + + + + openecomp-release + openecomp-release + https://nexus.onap.org/content/repositories/releases/ + + true + never + + + false + + + + + + + openecomp-snapshots + + + openecomp-snapshot + openecomp-snapshot + https://nexus.onap.org/content/repositories/snapshots/ + + false + + + true + + + + + + openecomp-snapshot + openecomp-snapshot + https://nexus.onap.org/content/repositories/snapshots/ + + false + + + true + + + + + + opendaylight-release + + + opendaylight-mirror + opendaylight-mirror + https://nexus.opendaylight.org/content/repositories/public/ + + true + never + + + false + + + + + + opendaylight-mirror + opendaylight-mirror + https://nexus.opendaylight.org/content/repositories/public/ + + true + never + + + false + + + + + + + opendaylight-snapshots + + + opendaylight-snapshot + opendaylight-snapshot + https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/ + + false + + + true + + + + + + opendaylight-snapshot + opendaylight-snapshot + https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/ + + false + + + true + + + + + + + + + openecomp-staging + openecomp-public + openecomp-release + openecomp-snapshots + + + + diff --git a/tls-init-container/Dockerfile b/tls-init-container/Dockerfile new file mode 100644 index 0000000..366276d --- /dev/null +++ b/tls-init-container/Dockerfile @@ -0,0 +1,23 @@ +# ============LICENSE_START======================================================= +# org.onap.dcae +# ================================================================================ +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +FROM busybox +RUN mkdir -p /opt/tls/source && mkdir -p /opt/tls/shared +COPY ./tls /opt/tls/source +RUN base64 -d /opt/tls/source/trust.jks.b64 > /opt/tls/source/trust.jks && base64 -d /opt/tls/source/cert.jks.b64 > /opt/tls/source/cert.jks && base64 -d /opt/tls/source/cert.p12.b64 > /opt/tls/source/cert.p12 +COPY setup-tls.sh /opt/tls +ENTRYPOINT ["/opt/tls/setup-tls.sh"] -- cgit 1.2.3-korg