diff options
author | Vijay Venkatesh Kumar <vv770d@att.com> | 2019-04-05 00:38:59 +0000 |
---|---|---|
committer | Vijay Venkatesh Kumar <vv770d@att.com> | 2019-04-08 17:41:01 +0000 |
commit | d2c821277a10f0c746ddb1a99c59a3ef88fb2f1c (patch) | |
tree | fc663e1621a896236a00208f849f30127e157eb5 /archive/heat | |
parent | f5564524a065321b1eb91b14dd3342acf85bfe62 (diff) |
Bulk update to deployment
1) Removed pnda folder (moved to pnda repo)
2) Moved older R3 - heat repo into archive folder
3) Added Dmaap plugin
4) Added new blueprints for Dashboard/helm
Change-Id: I82cb8c482a0a35fe8094da825e7403b0fc4ee33b
Signed-off-by: Vijay Venkatesh Kumar <vv770d@att.com>
Issue-ID: DCAEGEN2-1270
Signed-off-by: Vijay Venkatesh Kumar <vv770d@att.com>
Diffstat (limited to 'archive/heat')
-rwxr-xr-x | archive/heat/build-plugins.sh | 77 | ||||
-rw-r--r-- | archive/heat/docker-compose-1.yaml | 82 | ||||
-rw-r--r-- | archive/heat/docker-compose-2.yaml | 99 | ||||
-rw-r--r-- | archive/heat/docker-compose-3.yaml | 70 | ||||
-rw-r--r-- | archive/heat/docker-compose-4.yaml | 167 | ||||
-rw-r--r-- | archive/heat/pom.xml | 158 | ||||
-rwxr-xr-x | archive/heat/pullall.sh | 40 | ||||
-rwxr-xr-x | archive/heat/register.sh | 605 | ||||
-rwxr-xr-x | archive/heat/setup.sh | 142 | ||||
-rwxr-xr-x | archive/heat/teardown.sh | 35 |
10 files changed, 1475 insertions, 0 deletions
diff --git a/archive/heat/build-plugins.sh b/archive/heat/build-plugins.sh new file mode 100755 index 0000000..647ef7a --- /dev/null +++ b/archive/heat/build-plugins.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# ============LICENSE_START======================================================= +# org.onap.dcae +# ================================================================================ +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= +# +# ECOMP is a trademark and service mark of AT&T Intellectual Property. + +# Pull plugin archives from repos +# Build wagons +# $1 is the DCAE repo URL +# $2 is the CCSDK repo URL +# (This script runs at Docker image build time) +# +set -x +DEST=wagons + +# For DCAE, we get zips of the archives and build wagons +DCAEPLUGINFILES=\ +"\ +k8splugin/1.4.3/k8splugin-1.4.3.tgz +relationshipplugin/1.0.0/relationshipplugin-1.0.0.tgz +dcaepolicyplugin/2.3.0/dcaepolicyplugin-2.3.0.tgz +dockerplugin/3.2.0/dockerplugin-3.2.0.tgz \ +" + +# For CCSDK, we pull down the wagon files directly +CCSDKPLUGINFILES=\ +"\ +plugins/pgaas-1.1.0-py27-none-any.wgn +plugins/sshkeyshare-1.0.0-py27-none-any.wgn +" + +# Build a set of wagon files from archives in a repo +# $1 -- repo base URL +# $2 -- list of paths to archive files in the repo +function build { + for plugin in $2 + do + # Could just do wagon create with the archive URL as source, + # but can't use a requirements file with that approach + mkdir work + target=$(basename ${plugin}) + curl -Ss $1/${plugin} > ${target} + tar zxvf ${target} --strip-components=2 -C work + wagon create -t tar.gz -o ${DEST} -r work/requirements.txt --validate ./work + rm -rf work + done +} + +# Copy a set of wagons from a repo +# $1 -- repo baseURL +# $2 -- list of paths to wagons in the repo +function get_wagons { + for wagon in $2 + do + target=$(basename ${wagon}) + curl -Ss $1/${wagon} > ${DEST}/${target} + done +} + +mkdir ${DEST} +build $1 "${DCAEPLUGINFILES}" +get_wagons $2 "${CCSDKPLUGINFILES}" diff --git a/archive/heat/docker-compose-1.yaml b/archive/heat/docker-compose-1.yaml new file mode 100644 index 0000000..3041d6c --- /dev/null +++ b/archive/heat/docker-compose-1.yaml @@ -0,0 +1,82 @@ +version: '2.1' +services: + pgHolmes: + image: "postgres:9.5" + container_name: "pgHolmes" + restart: "always" + hostname: "phHolmes" + environment: + - "POSTGRES_USER=holmes" + - "POSTGRES_PASSWORD=holmespwd" + ports: + - "5432:5432" + labels: + - "SERVICE_5432_NAME=pgHolmes" + - "SERVICE_5432_CHECK_TCP=true" + - "SERVICE_5432_CHECK_INTERVAL=15s" + - "SERVICE_5432_CHECK_INITIAL_STATUS=passing" + + pgInventory: + image: "postgres:9.5" + container_name: "pgInventory" + restart: "always" + hostname: "pgInventory" + environment: + - "POSTGRES_USER=inventory" + - "POSTGRES_PASSWORD=inventorypwd" + ports: + - "5433:5432" + labels: + - "SERVICE_5432_NAME=pgInventory" + - "SERVICE_5432_CHECK_TCP=true" + - "SERVICE_5432_CHECK_INTERVAL=15s" + - "SERVICE_5432_CHECK_INITIAL_STATUS=passing" + + + consul: + image: "consul:0.8.3" + container_name: "consul" + privileged: true + restart: "always" + hostname: "consul" + ports: + - "8500:8500" + - "53:8600/udp" + - "53:8600/tcp" + environment: + - "DOCKER_HOST=tcp://{{ dcae_ip_addr }}:2376" + command: "agent -ui -server -bootstrap-expect 1 -client 0.0.0.0 -log-level trace -recursor {{ dns_ip_addr }}" + labels: + - "SERVICE_8500_NAME=consul" + - "SERVICE_8500_CHECK_HTTP=/v1/agent/services" + - "SERVICE_8500_CHECK_INTERVAL=15s" + - "SERVICE_8500_CHECK_INITIAL_STATUS=passing" + + + config-binding-service: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.configbinding.app-app:{{ dcae_docker_cbs }}" + container_name: "config_binding_service" + restart: "always" + hostname: "config-binding-service" + environment: + - "CONSUL_HOST=consul" + ports: + - "10000:10000" + depends_on: + - "consul" + - "tls-init" + labels: + - "SERVICE_10000_NAME=config_binding_service" + - "SERVICE_10000_CHECK_HTTP=/healthcheck" + - "SERVICE_10000_CHECK_INTERVAL=15s" + - "SERVICE_10000_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + tls-init: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.tls-init-container:{{ dcae_docker_tls }}" + container_name: "tls-init" + hostname: "tls-init" + volumes: + - "./tls/shared:/opt/tls/shared" diff --git a/archive/heat/docker-compose-2.yaml b/archive/heat/docker-compose-2.yaml new file mode 100644 index 0000000..dca210e --- /dev/null +++ b/archive/heat/docker-compose-2.yaml @@ -0,0 +1,99 @@ +version: '2.1' +services: + + mvp-dcaegen2-collectors-ves: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.ves.vescollector:{{ dcae_docker_ves }}" + container_name: "mvp-dcaegen2-collectors-ves" + restart: "always" + hostname: "mvp-dcaegen2-collectors-ves" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=mvp-dcaegen2-collectors-ves" + - "HOSTNAME=mvp-dcaegen2-collectors-ves" + ports: + - "8081:8080" + labels: + - "SERVICE_8080_NAME=mvp-dcaegen2-collectors-ves" + - "SERVICE_8080_CHECK_HTTP=/healthcheck" + - "SERVICE_8080_CHECK_INTERVAL=15s" + - "SERVICE_8080_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + mvp-dcaegen2-analytics-tca: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.tca-cdap-container:{{ dcae_docker_tca }}" + container_name: "mvp-dcaegen2-analytics-tca" + restart: "always" + hostname: "mvp-dcaegen2-analytics-tca" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "DMAAPPORT=3904" + - "DMAAPPUBTOPIC=unauthenticated.DCAE_CL_OUTPUT" + - "DMAAPSUBTOPIC=unauthenticated.VES_MEASUREMENT_OUTPUT" + - "AAIHOST={{ aai1_ip_addr }}" + - "AAIPORT=8443" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CBS_HOST=config-binding-service" + - "CBS_PORT=10000" + - "SERVICE_NAME=mvp-dcaegen2-analytics-tca" + - "HOSTNAME=mvp-dcaegen2-analytics-tca" + - "CONFIG_BINDING_SERVICE=config_binding_service" + # set the parameter below to enable REDIS caching. + #- REDISHOSTPORT=redis-cluster:6379 + ports: + - "11011:11011" + #- "11015:11015" + labels: + - "SERVICE_11011_NAME=mvp-dcaegen2-analytics-tca" + - "SERVICE_11011_CHECK_HTTP=/cdap/ns/cdap_tca_hi_lo" + - "SERVICE_11011_CHECK_INTERVAL=15s" + - "SERVICE_11011_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + mvp-dcaegen2-analytics-holmes-engine-management: + image: "{{ nexus_docker_repo }}/onap/holmes/engine-management:{{ holmes_docker_em }}" + container_name: "mvp-dcaegen2-analytics-holmes-engine-management" + restart: "always" + hostname: "mvp-dcaegen2-analytics-holmes-engine-management" + environment: + - "URL_JDBC=pgHolmes:5432" + - "JDBC_USERNAME=holmes" + - "JDBC_PASSWORD=holmespwd" + - "MSB_ADDR={{ msb_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "HOSTNAME=mvp-dcaegen2-analytics-holmes-engine-management" + ports: + - "9102:9102" + labels: + - "SERVICE_9102_IGNORE=true" + volumes: + - "./tls/shared:/opt/tls/shared" + + mvp-dcaegen2-analytics-holmes-rule-management: + image: "{{ nexus_docker_repo }}/onap/holmes/rule-management:{{ holmes_docker_rm }}" + container_name: "mvp-dcaegen2-analytics-holmes-rule-management" + restart: "always" + hostname: "mvp-dcaegen2-analytics-holmes-rule-management" + environment: + - "URL_JDBC=pgHolmes:5432" + - "JDBC_USERNAME=holmes" + - "JDBC_PASSWORD=holmespwd" + - "MSB_ADDR={{ msb_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "HOSTNAME=mvp-dcaegen2-analytics-holmes-rule-management" + ports: + - "9101:9101" + labels: + - "SERVICE_9101_IGNORE=true" + volumes: + - "./tls/shared:/opt/tls/shared" diff --git a/archive/heat/docker-compose-3.yaml b/archive/heat/docker-compose-3.yaml new file mode 100644 index 0000000..27dbb38 --- /dev/null +++ b/archive/heat/docker-compose-3.yaml @@ -0,0 +1,70 @@ +version: '2.1' +services: + + inventory: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.inventory-api:{{ dcae_docker_inv }}" + restart: "always" + container_name: "inventory" + hostname: "inventory" + environment: + - "POSTGRES_USER=inventory" + - "POSTGRES_PASSWORD=inventorypwd" + ports: + - "8080:8080" + labels: + - "SERVICE_8080_NAME=inventory" + - "SERVICE_8080_CHECK_HTTP=/dcae-service-types" + - "SERVICE_8080_CHECK_INTERVAL=15s" + - "SERVICE_8080_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + service-change-handler: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.servicechange-handler:{{ dcae_docker_sch }}" + container_name: "service-change-handler" + restart: "always" + hostname: "service-change-handler" + ports: + - "8079:8079" + environment: + - "POSTGRES_USER=inventory" + - "POSTGRES_PASSWORD=inventorypwd" + labels: + - "SERVICE_NAME=service_change_handler" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/health.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + deployment_handler: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.deployment-handler:{{ dcae_docker_dh }}" + container_name: "deployment-handler" + restart: "always" + hostname: "deployment-handler" + environment: + - "CLOUDIFY_PASSWORD=admin" + - "CLOUDIFY_USER=admin" + ports: + - "8188:8443" + volumes: + - "./tls/shared:/opt/app/dh/etc/cert/" + + + policy_handler: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.policy-handler:{{ dcae_docker_ph }}" + container_name: "policy-handler" + restart: "always" + hostname: "policy-handler" + ports: + - "25577:25577" + labels: + - "SERVICE_25577_NAME=policy_handler" + - "SERVICE_25577_CHECK_HTTP=/healthcheck" + - "SERVICE_25577_CHECK_INTERVAL=15s" + - "SERVICE_25577_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/app/policy_handler/etc/tls/certs/" + diff --git a/archive/heat/docker-compose-4.yaml b/archive/heat/docker-compose-4.yaml new file mode 100644 index 0000000..c13562d --- /dev/null +++ b/archive/heat/docker-compose-4.yaml @@ -0,0 +1,167 @@ +version: '2.1' +services: + snmptrap: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.snmptrap:{{ dcae_docker_snmptrap }}" + container_name: "static-dcaegen2-collectors-snmptrap" + restart: "always" + hostname: "static-dcaegen2-collectors-snmptrap" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-collectors-snmptrap" + - "HOSTNAME=static-dcaegen2-collectors-snmptrap" + - "HOSTALIASES=/etc/host.aliases" + ports: + - "162:6162/udp" + labels: + - "SERVICE_NAME=static-dcaegen2-collectors-snmptrap" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/snmptrap/bin/snmptrapd.sh status" + - "SERVICE_CHECK_INTERVAL=300s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + prh: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.prh.prh-app-server:{{ dcae_docker_prh }}" + container_name: "static-dcaegen2-services-prh" + restart: "always" + hostname: "static-dcaegen2-services-prh" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-services-prh" + - "HOSTNAME=static-dcaegen2-services-prh" + - "HOSTALIASES=/etc/host.aliases" + ports: + - "8082:8080" + labels: + - "SERVICE_8082_NAME=static-dcaegen2-services-prh" + - "SERVICE_8082_CHECK_HTTP=/heartbeat" + - "SERVICE_8082_CHECK_INTERVAL=15s" + - "SERVICE_8082_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + hvves: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:{{ dcae_docker_hvves }}" + container_name: "static-dcaegen2-collectors-hvves" + restart: "always" + hostname: "static-dcaegen2-collectors-hvves" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-collectors-hvves" + - "HOSTNAME=static-dcaegen2-collectors-hvves" + - "HOSTALIASES=/etc/host.aliases" + ports: + - "6061:6061" + labels: + - "SERVICE_NAME=static-dcaegen2-collectors-hvves" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/hvves/bin/healthcheck.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + datafile: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.datafile.datafile-app-server:{{ dcae_docker_datafile }}" + container_name: "static-dcaegen2-collectors-datafile" + restart: "always" + hostname: "static-dcaegen2-collectors-datafile" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-collectors-datafile" + - "HOSTNAME=static-dcaegen2-collectors-datafile" + - "HOSTALIASES=/etc/host.aliases" + labels: + - "SERVICE_NAME=static-dcaegen2-collectors-datafile" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + mapper-universalvesadaptor: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.universalvesadaptor:{{ dcae_docker_mua }}" + container_name: "static-dcaegen2-services-mua" + restart: "always" + hostname: "static-dcaegen2-services-mua" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-services-mua" + - "HOSTNAME=static-dcaegen2-services-mua" + - "HOSTALIASES=/etc/host.aliases" + - "MR_DEFAULT_PORT_NUMBER=3904" + - "URL_JDBC=jdbc:postgresql://{{dcae_ip_addr}}:5433/inventory" + - "JDBC_USERNAME=inventory" + - "JDBC_PASSWORD=inventorypwd" + labels: + - "SERVICE_NAME=static-dcaegen2-services-mua" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + mapper-snmp: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.snmpmapper:{{ dcae_docker_msnmp }}" + container_name: "static-dcaegen2-services-msnmp" + restart: "always" + hostname: "static-dcaegen2-services-msnmp" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-services-msnmp" + - "HOSTNAME=static-dcaegen2-services-msnmp" + - "HOSTALIASES=/etc/host.aliases" + - "URL_JDBC=jdbc:postgresql://{{dcae_ip_addr}}:5433/inventory" + - "JDBC_USERNAME=inventory" + - "JDBC_PASSWORD=inventorypwd" + labels: + - "SERVICE_NAME=static-dcaegen2-services-msnmp" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + + heartbeat: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.heartbeat:{{ dcae_docker_heartbeat }}" + container_name: "static-dcaegen2-services-heartbeat" + restart: "always" + hostname: "static-dcaegen2-services-heartbeat" + environment: + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=static-dcaegen2-services-heartbeat" + - "HOSTNAME=static-dcaegen2-services-heartbeat" + - "HOSTALIASES=/etc/host.aliases" + labels: + - "SERVICE_NAME=static-dcaegen2-services-heartbeat" + - "SERVICE_CHECK_DOCKER_SCRIPT=/opt/app/datafile/bin/healthcheck.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + volumes: + - "./tls/shared:/opt/tls/shared" + + diff --git a/archive/heat/pom.xml b/archive/heat/pom.xml new file mode 100644 index 0000000..e21db72 --- /dev/null +++ b/archive/heat/pom.xml @@ -0,0 +1,158 @@ +<?xml version="1.0"?> +<!-- +================================================================================ +Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +================================================================================ +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============LICENSE_END========================================================= + +--> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <modelVersion>4.0.0</modelVersion> + <parent> + <groupId>org.onap.dcaegen2.deployments</groupId> + <artifactId>deployments</artifactId> + <version>1.2.0-SNAPSHOT</version> + </parent> + <groupId>org.onap.dcaegen2.deployments</groupId> + <artifactId>heat</artifactId> + <name>dcaegen2-deployments-heat</name> + <version>1.0.0-SNAPSHOT</version> + <url>http://maven.apache.org</url> + <properties> + <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> + <sonar.skip>true</sonar.skip> + </properties> + <build> + <finalName>${project.artifactId}-${project.version}</finalName> + <plugins> + <!-- plugin> + <artifactId>maven-assembly-plugin</artifactId> + <version>2.4.1</version> + <configuration> + <descriptors> + <descriptor>assembly/dep.xml</descriptor> + </descriptors> + </configuration> + <executions> + <execution> + <id>make-assembly</id> + <phase>package</phase> + <goals> + <goal>single</goal> + </goals> + </execution> + </executions> + </plugin --> + <!-- now we configure custom action (calling a script) at various lifecycle phases --> + <plugin> + <groupId>org.codehaus.mojo</groupId> + <artifactId>exec-maven-plugin</artifactId> + <version>1.2.1</version> + <executions> + <execution> + <id>clean phase script</id> + <phase>clean</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>clean</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>generate-sources script</id> + <phase>generate-sources</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>generate-sources</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>compile script</id> + <phase>compile</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>compile</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>package script</id> + <phase>package</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>package</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>test script</id> + <phase>test</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>test</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>install script</id> + <phase>install</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>install</argument> + </arguments> + </configuration> + </execution> + <execution> + <id>deploy script</id> + <phase>deploy</phase> + <goals> + <goal>exec</goal> + </goals> + <configuration> + <arguments> + <argument>${project.artifactId}</argument> + <argument>deploy</argument> + </arguments> + </configuration> + </execution> + </executions> + </plugin> + </plugins> + </build> +</project> diff --git a/archive/heat/pullall.sh b/archive/heat/pullall.sh new file mode 100755 index 0000000..42ee1ad --- /dev/null +++ b/archive/heat/pullall.sh @@ -0,0 +1,40 @@ +#!/bin/bash +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# + +docker login {{ nexus_docker_repo }} -u {{ nexus_username }} -p {{ nexus_password }} + +docker pull postgres:9.5 +docker pull consul:0.8.3 +docker pull nginx:latest +docker pull onapdcae/registrator:v7 +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.configbinding.app-app:{{ dcae_docker_cbs }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.ves.vescollector:{{ dcae_docker_ves }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.tca-cdap-container:{{ dcae_docker_tca }} +docker pull {{ nexus_docker_repo }}/onap/holmes/engine-management:{{ holmes_docker_em }} +docker pull {{ nexus_docker_repo }}/onap/holmes/rule-management:{{ holmes_docker_rm }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.inventory-api:{{ dcae_docker_inv }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.servicechange-handler:{{ dcae_docker_sch }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.deployment-handler:{{ dcae_docker_dh }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.policy-handler:{{ dcae_docker_ph }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.snmptrap:{{ dcae_docker_snmptrap }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.prh.prh-app-server:{{ dcae_docker_prh }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:{{ dcae_docker_hvves }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.datafile.datafile-app-server:{{ dcae_docker_datafile }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.universalvesadaptor:{{ dcae_docker_mua }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.mapper.vesadapter.snmpmapper:{{ dcae_docker_msnmp }} +docker pull {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.services.heartbeat:{{ dcae_docker_heartbeat }} diff --git a/archive/heat/register.sh b/archive/heat/register.sh new file mode 100755 index 0000000..34c1505 --- /dev/null +++ b/archive/heat/register.sh @@ -0,0 +1,605 @@ +#!/bin/bash + +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# + + + +# We now register services that are not handled by Registrator +# minimum platform components +HOSTNAME_CONSUL="consul" +SRVCNAME_CONSUL="consul" +HOSTNAME_CM="cloudify-manager" +SRVCNAME_CM="cloudify_manager" +HOSTNAME_CBS="config-binding-service" +SRVCNAME_CBS="config_binding_service" + +# R3 MVP service components +HOSTNAME_MVP_VES="mvp-dcaegen2-collectors-ves" +SRVCNAME_MVP_VES="mvp-dcaegen2-collectors-ves" +HOSTNAME_MVP_TCA="mvp-dcaegen2-analytics-tca" +SRVCNAME_MVP_TCA="mvp-dcaegen2-analytics-tca" +HOSTNAME_MVP_HR="mvp-dcaegen2-analytics-holmes-rule-management" +SRVCNAME_MVP_HR="mvp-dcaegen2-analytics-holmes-rule-management" +HOSTNAME_MVP_HE="mvp-dcaegen2-analytics-holmes-engine-management" +SRVCNAME_MVP_HE="mvp-dcaegen2-analytics-holmes-engine-management" + +# R3 PLUS service components +HOSTNAME_STATIC_SNMPTRAP="static-dcaegen2-collectors-snmptrap" +SRVCNAME_STATIC_SNMPTRAP="static-dcaegen2-collectors-snmptrap" +HOSTNAME_STATIC_MAPPER="static-dcaegen2-services-mapper" +SRVCNAME_STATIC_MAPPER="static-dcaegen2-services-mapper" +HOSTNAME_STATIC_HEARTBEAT="static-dcaegen2-services-heartbeat" +SRVCNAME_STATIC_HEARTBEAT="static-dcaegen2-services-heartbeat" +HOSTNAME_STATIC_PRH="static-dcaegen2-services-prh" +SRVCNAME_STATIC_PRH="static-dcaegen2-services-prh" +HOSTNAME_STATIC_HVVES="static-dcaegen2-collectors-hvves" +SRVCNAME_STATIC_HVVES="static-dcaegen2-collectors-hvves" +HOSTNAME_STATIC_DFC="static-dcaegen2-collectors-datafile" +SRVCNAME_STATIC_DFC="static-dcaegen2-collectors-datafile" + + +# registering docker host +SVC_NAME="dockerhost" +SVC_IP="$(cat /opt/config/dcae_float_ip.txt)" +REGREQ=" +{ + \"Name\" : \"${SVC_NAME}\", + \"ID\" : \"${SVC_NAME}\", + \"Address\": \"${SVC_IP}\", + \"Port\": 2376, + \"Check\" : { + \"Name\" : \"${SVC_NAME}_health\", + \"Interval\" : \"15s\", + \"HTTP\" : \"http://${SVC_IP}:2376/containers/registrator/json\", + \"Status\" : \"passing\" + } +} +" +curl -v -X PUT -H 'Content-Type: application/json' \ +--data-binary "$REGREQ" \ +"http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" + +#Add KV for dockerplugin login +REGREQ=" +[ + { + \"username\": \"docker\", + \"password\": \"docker\", + \"registry\": \"nexus3.onap.org:10001\" + } +] +" +curl -v -X PUT -H 'Content-Type: application/json' \ +--data-binary "$REGREQ" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/docker_plugin/docker_logins" + + +# registering deployment handler +SVC_NAME="deployment_handler" +SVC_IP="$(cat /opt/config/dcae_ip_addr.txt)" +REGREQ=" +{ + \"Name\" : \"${SVC_NAME}\", + \"ID\" : \"${SVC_NAME}\", + \"Address\": \"${SVC_IP}\", + \"Port\": 8188, + \"Check\" : { + \"Name\" : \"${SVC_NAME}_health\", + \"Interval\" : \"15s\", + \"HTTP\" : \"https://${SVC_IP}:8188/\", + \"tls_skip_verify\": true, + \"Status\" : \"passing\" + } +} +" +curl -v -X PUT -H 'Content-Type: application/json' \ +--data-binary \ +"$REGREQ" "http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" + + +# registering Holmes services +SVC_NAME="${SRVCNAME_MVP_HR}" +SVC_IP="$(cat /opt/config/dcae_ip_addr.txt)" +REGREQ=" +{ + \"Name\" : \"${SVC_NAME}\", + \"ID\" : \"${SVC_NAME}\", + \"Address\": \"${SVC_IP}\", + \"Port\": 9101, + \"Check\" : { + \"Name\" : \"${SVC_NAME}_health\", + \"Interval\" : \"15s\", + \"HTTP\" : \"https://${SVC_IP}:9101/api/holmes-rule-mgmt/v1/healthcheck\", + \"tls_skip_verify\": true, + \"Status\" : \"passing\" + } +} +" +curl -v -X PUT -H 'Content-Type: application/json' \ +--data-binary \ +"$REGREQ" "http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" + + +SVC_NAME="${SRVCNAME_MVP_HE}" +SVC_IP="$(cat /opt/config/dcae_ip_addr.txt)" +REGREQ=" +{ + \"Name\" : \"${SVC_NAME}\", + \"ID\" : \"${SVC_NAME}\", + \"Address\": \"${SVC_IP}\", + \"Port\": 9102, + \"Check\" : { + \"Name\" : \"${SVC_NAME}_health\", + \"Interval\" : \"15s\", + \"HTTP\" : \"https://${SVC_IP}:9102/api/holmes-engine-mgmt/v1/healthcheck\", + \"tls_skip_verify\": true, + \"Status\" : \"passing\" + } +} +" +curl -v -X PUT -H 'Content-Type: application/json' \ +--data-binary "$REGREQ" \ +"http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" + + + +# now push KVs +# generated with https://www.browserling.com/tools/json-escape +# config binding service +REGKV=" +{} +" +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +http://${HOSTNAME_CONSUL}:8500/v1/kv/config_binding_service +# checked + + + +# inventory +REGKV=' +{ + "database": { + "checkConnectionWhileIdle": false, + "driverClass": "org.postgresql.Driver", + "evictionInterval": "10s", + "initialSize": 2, + "maxSize": 8, + "maxWaitForConnection": "1s", + "minIdleTime": "1 minute", + "minSize": 2, + "password": "inventorypwd", + "properties": { + "charSet": "UTF-8"}, + "url": "jdbc:postgresql://pgInventory:5432/postgres", + "user": "inventory", + "validationQuery": "/* MyService Health Check */ SELECT 1" + }, + "databusControllerConnection": { + "host": "databus-controller-hostname", + "mechId": null, + "password": null, + "port": 8443, + "required": false}, + "httpClient": { + "connectionTimeout": "5000milliseconds", + "gzipEnabled": false, + "gzipEnabledForRequests": false, + "maxThreads": 128, + "minThreads": 1, + "timeout": "5000milliseconds" + } + } + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +http://${HOSTNAME_CONSUL}:8500/v1/kv/inventory +# checked + + +# policy handler +REGKV=' +{ + "policy_handler": { + "deploy_handler": { + "target_entity": "deployment_handler", + "tls_ca_mode": "do_not_verify", + "max_msg_length_mb": 5, + "url" : "https://{{ dcae_ip_addr }}:8188", + "tls_ca_mode" : "cert_directory", + "query": { + "cfy_tenant_name": "default_tenant" + } + }, + "thread_pool_size": 4, + "policy_retry_count": 5, + "pool_connections": 20, + "policy_retry_sleep": 5, + "catch_up": { + "interval": 1200 + }, + "reconfigure": { + "interval": 600 + }, + "policy_engine": { + "path_decision": "/decision/v1", + "path_api": "/pdp/api/", + "path_notifications" : "/pdp/notifications", + "tls_ca_mode" : "cert_directory", + "tls_wss_ca_mode" : "cert_directory", + "headers": { + "Environment": "TEST", + "ClientAuth": "cHl0aG9uOnRlc3Q=", + "Content-Type": "application/json", + "Accept": "application/json", + "Authorization": "Basic dGVzdHBkcDphbHBoYTEyMw==" + }, + "url": "https://{{ policy_ip_addr }}:8081", + "target_entity": "policy_engine" + } + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/policy_handler" + + +# service change handler +REGKV=' +{ + "asdcDistributionClient": { + "asdcAddress": "{{ sdc_ip_addr }}:8443", + "asdcUri": "https://{{ sdc_ip_addr }}:8443", + "msgBusAddress": "{{ mr_ip_addr }}", + "user": "dcae", + "password": "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U", + "pollingInterval": 20, + "pollingTimeout": 20, + "consumerGroup": "dcae", + "consumerId": "dcae-sch", + "environmentName": "AUTO", + "keyStorePath": null, + "keyStorePassword": null, + "activateServerTLSAuth": false, + "useHttpsWithDmaap": false, + "isFilterInEmptyResources": false + }, + "dcaeInventoryClient": { + "uri": "http://inventory:8080" + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/service-change-handler" + + +# deployment handler +REGKV=' +{ + "logLevel": "DEBUG", + "cloudify": { + "protocol": "http" + }, + "inventory": { + "protocol": "http" + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/deployment_handler" + + +# ves +MR_IP="$(cat /opt/config/mr_ip_addr.txt)" +REGKV=' +{ + "event.transform.flag": "0", + "tomcat.maxthreads": "200", + "collector.schema.checkflag": "1", + "collector.dmaap.streamid": "fault=ves_fault|syslog=ves_syslog|heartbeat=ves_heartbeat|measurementsForVfScaling=ves_measurement|mobileFlow=ves_mobileflow|other=ves_other|stateChange=ves_statechange|thresholdCrossingAlert=ves_thresholdCrossingAlert|voiceQuality=ves_voicequality|sipSignaling=ves_sipsignaling", + "collector.service.port": "8080", + "collector.schema.file": "{\"v1\":\"./etc/CommonEventFormat_27.2.json\",\"v2\":\"./etc/CommonEventFormat_27.2.json\",\"v3\":\"./etc/CommonEventFormat_27.2.json\",\"v4\":\"./etc/CommonEventFormat_27.2.json\",\"v5\":\"./etc/CommonEventFormat_28.4.1.json\"}", + "collector.keystore.passwordfile": "/opt/app/VESCollector/etc/passwordfile", + "collector.inputQueue.maxPending": "8096", + "streams_publishes": { + "ves_measurement": { + "type": "message_router", + "dmaap_info": { + "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.VES_MEASUREMENT_OUTPUT/" + } + }, + "ves_fault": { + "type": "message_router", + "dmaap_info": { + "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.SEC_FAULT_OUTPUT/" + } + } + }, + "collector.service.secure.port": "8443", + "header.authflag": "0", + "collector.keystore.file.location": "/opt/app/VESCollector/etc/keystore", + "collector.keystore.alias": "dynamically generated", + "services_calls": [], + "header.authlist": "userid1,base64encodepwd1|userid2,base64encodepwd2" +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcaegen2-collectors-ves" + + +# holmes rule management +MSB_IP="$(cat /opt/config/msb_ip_addr.txt)" +REGKV=" +{ + \"streams_subscribes\": {}, + \"msb.hostname\": \"${MSB_IP_ADDR}\", + \"msb.uri\": \"/api/microservices/v1/services\", + \"streams_publishes\": {}, + \"holmes.default.rule.volte.scenario1\": \"ControlLoop-VOLTE-2179b738-fd36-4843-a71a-a8c24c70c55b\$\$\$package org.onap.holmes.droolsRule;\\n\\nimport org.onap.holmes.common.dmaap.DmaapService;\\nimport org.onap.holmes.common.api.stat.VesAlarm;\\nimport org.onap.holmes.common.aai.CorrelationUtil;\\nimport org.onap.holmes.common.dmaap.entity.PolicyMsg;\\nimport org.onap.holmes.common.dropwizard.ioc.utils.ServiceLocatorHolder;\\nimport org.onap.holmes.common.utils.DroolsLog;\\n \\n\\nrule \\\"Relation_analysis_Rule\\\"\\nsalience 200\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 0,\\n \$sourceId: sourceId, sourceId != null && !sourceId.equals(\\\"\\\"),\\n\\t\\t\\t\$sourceName: sourceName, sourceName \!= null \&\& \!sourceName.equals(\\\"\\\"),\\n\\t\\t\\t\$startEpochMicrosec: startEpochMicrosec,\\n eventName in (\\\"Fault_MultiCloud_VMFailure\\\"),\\n \$eventId: eventId)\\n \$child : VesAlarm( eventId \!= $eventId, parentId == null,\\n CorrelationUtil.getInstance().isTopologicallyRelated(sourceId, \$sourceId, \$sourceName),\\n eventName in (\\\"Fault_MME_eNodeB out of service alarm\\\"),\\n startEpochMicrosec \< \$startEpochMicrosec + 60000 \&\& startEpochMicrosec \> \$startEpochMicrosec - 60000 )\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"Relation_analysis_Rule: rootId=\\\" + \$root.getEventId() + \\\", childId=\\\" + \$child.getEventId());\\n\\t\\t\$child.setParentId(\$root.getEventId());\\n\\t\\tupdate(\$child);\\n\\t\\t\\nend\\n\\nrule \\\"root_has_child_handle_Rule\\\"\\nsalience 150\\nno-loop true\\n\\twhen\\n\\t\\t\$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0, \$eventId: eventId)\\n\\t\\t\$child : VesAlarm(eventId \!= $eventId, parentId == $eventId)\\n\\tthen\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_has_child_handle_Rule: rootId=\\\" + \$root.getEventId() + \\\", childId=\\\" + $child.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, \$child, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\t\$root.setRootFlag(1);\\n\\t\\tupdate(\$root);\\nend\\n\\nrule \\\"root_no_child_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0,\\n sourceId \!= null \&\& \!sourceId.equals(\\\"\\\"),\\n\\t\\t\\tsourceName \!= null \&\& \!sourceName.equals(\\\"\\\"),\\n eventName in (\\\"Fault_MultiCloud_VMFailure\\\"))\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_no_child_handle_Rule: rootId=\\\" + \$root.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, null, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\t$root.setRootFlag(1);\\n\\t\\tupdate(\$root);\\nend\\n\\nrule \\\"root_cleared_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 1, rootFlag == 1)\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_cleared_handle_Rule: rootId=\\\" + \$root.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, null, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\tretract(\$root);\\nend\\n\\nrule \\\"child_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$child : VesAlarm(alarmIsCleared == 1, rootFlag == 0)\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"child_handle_Rule: childId=\\\" + \$child.getEventId());\\n\\t\\tretract(\$child);\\nend\", + \"services_calls\": {} +}" + + + +REGKV=' +{ + "streams_subscribes": {}, + "msb.hostname": "{{ msb_ip_addr }}", + "msb.uri": "/api/microservices/v1/services", + "streams_publishes": {}, + "holmes.default.rule.volte.scenario1": "ControlLoop-VOLTE-2179b738-fd36-4843-a71a-a8c24c70c55b$$$package org.onap.holmes.droolsRule;\n\nimport org.onap.holmes.common.dmaap.DmaapService;\nimport org.onap.holmes.common.api.stat.VesAlarm;\nimport org.onap.holmes.common.aai.CorrelationUtil;\nimport org.onap.holmes.common.dmaap.entity.PolicyMsg;\nimport org.onap.holmes.common.dropwizard.ioc.utils.ServiceLocatorHolder;\nimport org.onap.holmes.common.utils.DroolsLog;\n \n\nrule \"Relation_analysis_Rule\"\nsalience 200\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 0,\n $sourceId: sourceId, sourceId != null && !sourceId.equals(\"\"),\n\t\t\t$sourceName: sourceName, sourceName != null && !sourceName.equals(\"\"),\n\t\t\t$startEpochMicrosec: startEpochMicrosec,\n eventName in (\"Fault_MultiCloud_VMFailure\"),\n $eventId: eventId)\n $child : VesAlarm( eventId != $eventId, parentId == null,\n CorrelationUtil.getInstance().isTopologicallyRelated(sourceId, $sourceId, $sourceName),\n eventName in (\"Fault_MME_eNodeB out of service alarm\"),\n startEpochMicrosec < $startEpochMicrosec + 60000 && startEpochMicrosec > $startEpochMicrosec - 60000 )\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"Relation_analysis_Rule: rootId=\" + $root.getEventId() + \", childId=\" + $child.getEventId());\n\t\t$child.setParentId($root.getEventId());\n\t\tupdate($child);\n\t\t\nend\n\nrule \"root_has_child_handle_Rule\"\nsalience 150\nno-loop true\n\twhen\n\t\t$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0, $eventId: eventId)\n\t\t$child : VesAlarm(eventId != $eventId, parentId == $eventId)\n\tthen\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_has_child_handle_Rule: rootId=\" + $root.getEventId() + \", childId=\" + $child.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, $child, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\t$root.setRootFlag(1);\n\t\tupdate($root);\nend\n\nrule \"root_no_child_handle_Rule\"\nsalience 100\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 0, rootFlag == 0,\n sourceId != null && !sourceId.equals(\"\"),\n\t\t\tsourceName != null && !sourceName.equals(\"\"),\n eventName in (\"Fault_MultiCloud_VMFailure\"))\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_no_child_handle_Rule: rootId=\" + $root.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, null, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\t$root.setRootFlag(1);\n\t\tupdate($root);\nend\n\nrule \"root_cleared_handle_Rule\"\nsalience 100\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 1, rootFlag == 1)\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_cleared_handle_Rule: rootId=\" + $root.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, null, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\tretract($root);\nend\n\nrule \"child_handle_Rule\"\nsalience 100\nno-loop true\n when\n $child : VesAlarm(alarmIsCleared == 1, rootFlag == 0)\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"child_handle_Rule: childId=\" + $child.getEventId());\n\t\tretract($child);\nend", + "services_calls": {} +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcae-analytics-holmes-rule-management" + + + +# Holmes engine management +REGKV=' +{ + "msb.hostname": "10.0.14.1", + "services_calls": {}, + "msb.uri": "/api/microservices/v1/services", + "streams_publishes": { + "dcae_cl_out": { + "type": "message_router", + "dmaap_info": { + "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.DCAE_CL_OUTPUT" + } + } + }, + "streams_subscribes": { + "ves_fault": { + "type": "message_router", + "dmaap_info": { + "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.SEC_FAULT_OUTPUT" + } + } + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcae-analytics-holmes-engine-management" + + +#curl http://localhost:8500/v1/kv/config_binding_service |jq .[0].Value |sed -e 's/\"//g' |base64 --decode + + + +# TCA +REGKV=' +{ + "thresholdCalculatorFlowletInstances": "2", + "tcaVESMessageStatusTableTTLSeconds": "86400", + "tcaVESMessageStatusTableName": "TCAVESMessageStatusTable", + "tcaVESAlertsTableTTLSeconds": "1728000", + "tcaVESAlertsTableName": "TCAVESAlertsTable", + "tcaSubscriberOutputStreamName": "TCASubscriberOutputStream", + "tcaAlertsAbatementTableTTLSeconds": "1728000", + "tcaAlertsAbatementTableName": "TCAAlertsAbatementTable", + "streams_subscribes": {}, + "streams_publishes": {}, + "services_calls": {}, + "appName": "dcae-tca", + "appDescription": "DCAE Analytics Threshold Crossing Alert Application" +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcaegen2-analytics-tca" + + +# TCA pref +REGKV='{ + "tca_policy": "{\"domain\":\"measurementsForVfScaling\",\"metricsPerEventName\":[{\"eventName\":\"vFirewallBroadcastPackets\",\"controlLoopSchemaType\":\"VNF\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":300,\"direction\":\"LESS_OR_EQUAL\",\"severity\":\"MAJOR\",\"closedLoopEventStatus\":\"ONSET\"},{\"closedLoopControlName\":\"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":700,\"direction\":\"GREATER_OR_EQUAL\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]},{\"eventName\":\"vLoadBalancer\",\"controlLoopSchemaType\":\"VM\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":300,\"direction\":\"GREATER_OR_EQUAL\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]},{\"eventName\":\"Measurement_vGMUX\",\"controlLoopSchemaType\":\"VNF\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\",\"thresholdValue\":0,\"direction\":\"EQUAL\",\"severity\":\"MAJOR\",\"closedLoopEventStatus\":\"ABATED\"},{\"closedLoopControlName\":\"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\",\"thresholdValue\":0,\"direction\":\"GREATER\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]}]}", + "subscriberTopicName": "unauthenticated.VES_MEASUREMENT_OUTPUT", + "subscriberTimeoutMS": "-1", + "subscriberProtocol": "http", + "subscriberPollingInterval": "30000", + "subscriberMessageLimit": "-1", + "subscriberHostPort": "3904", + "subscriberHostName":"{{ mr_ip_addr }}", + "subscriberContentType": "application/json", + "subscriberConsumerId": "c12", + "subscriberConsumerGroup": "OpenDCAE-c12", + "publisherTopicName": "unauthenticated.DCAE_CL_OUTPUT", + "publisherProtocol": "http", + "publisherPollingInterval": "20000", + "publisherMaxRecoveryQueueSize": "100000", + "publisherMaxBatchSize": "1", + "publisherHostPort": "3904", + "publisherHostName": "{{ mr_ip_addr }}", + "publisherContentType": "application/json", + "enableAlertCEFFormat": "false", + "enableAAIEnrichment": true, + "aaiVNFEnrichmentAPIPath": "/aai/v11/network/generic-vnfs/generic-vnf", + "aaiVMEnrichmentAPIPath": "/aai/v11/search/nodes-query", + "aaiEnrichmentUserPassword": "DCAE", + "aaiEnrichmentUserName": "DCAE", + "aaiEnrichmentProtocol": "https", + "aaiEnrichmentPortNumber": "8443", + "aaiEnrichmentIgnoreSSLCertificateErrors": "true", + "aaiEnrichmentHost":"{{ aai1_ip_addr }}", + "enableRedisCaching":false +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcaegen2-analytics-tca:preferences" + + + +# SNMP Trap Collector +SERVICENAME="${SRVCNAME_STATIC_SNMPTRAP}" +REGKV='{ + "files": { + "roll_frequency": "day", + "data_dir": "data", + "arriving_traps_log": "snmptrapd_arriving_traps.log", + "minimum_severity_to_log": 2, + "traps_stats_log": "snmptrapd_stats.csv", + "perm_status_file": "snmptrapd_status.log", + "pid_dir": "tmp", + "eelf_audit": "audit.log", + "log_dir": "logs", + "eelf_metrics": "metrics.log", + "eelf_base_dir": "/opt/app/snmptrap/logs", + "runtime_base_dir": "/opt/app/snmptrap", + "eelf_error": "error.log", + "eelf_debug": "debug.log", + "snmptrapd_diag": "snmptrapd_prog_diag.log" + }, + "publisher": { + "http_milliseconds_between_retries": 750, + "max_milliseconds_between_publishes": 10000, + "max_traps_between_publishes": 10, + "http_retries": 3, + "http_primary_publisher": "true", + "http_milliseconds_timeout": 1500, + "http_peer_publisher": "unavailable" + }, + "snmptrapd": { + "version": "1.4.0", + "title": "Collector for receiving SNMP traps and publishing to DMAAP/MR" + }, + "cache": { + "dns_cache_ttl_seconds": 60 + }, + "sw_interval_in_seconds": 60, + "streams_publishes": { + "sec_fault_unsecure": { + "type": "message_router", + "dmaap_info": { + "topic_url": "http://{{ mr_ip_addr }}:3904/events/unauthenticated.ONAP-COLLECTOR-SNMPTRAP" + } + } + }, + "StormWatchPolicy": "", + "services_calls": {}, + "protocols": { + "ipv4_interface": "0.0.0.0", + "ipv4_port": 6162, + "ipv6_interface": "::1", + "ipv6_port": 6162 + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" + + + +# hv-ves collector +SERVICENAME="${SRVCNAME_STATIC_HVVES}" +REGKV='{ + "dmaap.kafkaBootstrapServers": "{{ mr_ip_addr }}:9092", + "collector.routing": { + "fromDomain": "HVMEAS", + "toTopic": "HV_VES_MEASUREMENTS" + } +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" + + +# data file collector +SERVICENAME="${SRVCNAME_STATIC_DFC}" + REGKV='{ + "dmaap.dmaapConsumerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", + "dmaap.dmaapConsumerConfiguration.dmaapPortNumber": 2222, + "dmaap.dmaapConsumerConfiguration.dmaapTopicName": "/events/unauthenticated.VES_NOTIFICATION_OUTPUT", + "dmaap.dmaapConsumerConfiguration.dmaapProtocol": "http", + "dmaap.dmaapConsumerConfiguration.dmaapUserName": "", + "dmaap.dmaapConsumerConfiguration.dmaapUserPassword": "", + "dmaap.dmaapConsumerConfiguration.dmaapContentType": "application/json", + "dmaap.dmaapConsumerConfiguration.consumerId": "C12", + "dmaap.dmaapConsumerConfiguration.consumerGroup": "OpenDcae-c12", + "dmaap.dmaapConsumerConfiguration.timeoutMs": -1, + "dmaap.dmaapConsumerConfiguration.messageLimit": 1, + "dmaap.dmaapProducerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", + "dmaap.dmaapProducerConfiguration.dmaapPortNumber": 3907, + "dmaap.dmaapProducerConfiguration.dmaapTopicName": "publish", + "dmaap.dmaapProducerConfiguration.dmaapProtocol": "https", + "dmaap.dmaapProducerConfiguration.dmaapUserName": "dradmin", + "dmaap.dmaapProducerConfiguration.dmaapUserPassword": "dradmin", + "dmaap.dmaapProducerConfiguration.dmaapContentType": "application/octet-stream", + "ftp.ftpesConfiguration.keyCert": "config/ftpKey.jks", + "ftp.ftpesConfiguration.keyPassword": "secret", + "ftp.ftpesConfiguration.trustedCA": "config/cacerts", + "ftp.ftpesConfiguration.trustedCAPassword": "secret" + }' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" + + +# PNF Registration Handler +SERVICENAME="${SRVCNAME_STATIC_PRH}" +REGKV='{ + "dmaap.dmaapProducerConfiguration.dmaapTopicName": "/events/unauthenticated.PNF_READY", + "dmaap.dmaapConsumerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", + "aai.aaiClientConfiguration.aaiPnfPath": "/network/pnfs/pnf", + "aai.aaiClientConfiguration.aaiUserPassword": "AAI", + "dmaap.dmaapConsumerConfiguration.dmaapUserName": "admin", + "aai.aaiClientConfiguration.aaiBasePath": "/aai/v12", + "dmaap.dmaapConsumerConfiguration.timeoutMs": -1, + "dmaap.dmaapProducerConfiguration.dmaapPortNumber": 3904, + "aai.aaiClientConfiguration.aaiHost": "{{ aai1_ip_addr }}", + "dmaap.dmaapConsumerConfiguration.dmaapUserPassword": "admin", + "dmaap.dmaapProducerConfiguration.dmaapProtocol": "http", + "aai.aaiClientConfiguration.aaiIgnoreSslCertificateErrors": true, + "dmaap.dmaapProducerConfiguration.dmaapContentType": "application/json", + "dmaap.dmaapConsumerConfiguration.dmaapTopicName": "/events/unauthenticated.VES_PNFREG_OUTPUT", + "dmaap.dmaapConsumerConfiguration.dmaapPortNumber": 3904, + "dmaap.dmaapConsumerConfiguration.dmaapContentType": "application/json", + "dmaap.dmaapConsumerConfiguration.messageLimit": -1, + "dmaap.dmaapConsumerConfiguration.dmaapProtocol": "http", + "aai.aaiClientConfiguration.aaiUserName": "AAI", + "dmaap.dmaapConsumerConfiguration.consumerId": "c12", + "dmaap.dmaapProducerConfiguration.dmaapHostName": "{{ mr_ip_addr }}", + "aai.aaiClientConfiguration.aaiHostPortNumber": 8443, + "dmaap.dmaapConsumerConfiguration.consumerGroup": "OpenDCAE-c12", + "aai.aaiClientConfiguration.aaiProtocol": "https", + "dmaap.dmaapProducerConfiguration.dmaapUserName": "admin", + "dmaap.dmaapProducerConfiguration.dmaapUserPassword": "admin" +}' +curl -v -X PUT -H "Content-Type: application/json" \ +--data "${REGKV}" \ +"http://${HOSTNAME_CONSUL}:8500/v1/kv/${SERVICENAME}" diff --git a/archive/heat/setup.sh b/archive/heat/setup.sh new file mode 100755 index 0000000..b95e56e --- /dev/null +++ b/archive/heat/setup.sh @@ -0,0 +1,142 @@ +#!/bin/bash +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# + + +NETWORK="config_default" + +echo "Cleaning up any previously deployed cludify manager and registrator" +docker stop registrator cloudify-manager +docker rm registrator cloudify-manager + +echo "Launching registrator on dockerhost" +docker run -d \ +--network=${NETWORK} \ +--name=registrator \ +-e EXTERNAL_IP={{ dcae_ip_addr }} \ +-e CONSUL_HOST=consul \ +-v /var/run/docker.sock:/tmp/docker.sock \ +onapdcae/registrator:v7 + + + + +rm -rf scripts-in-container +mkdir scripts-in-container +cat > scripts-in-container/install-plugins.sh << EOL +#!/bin/bash +source /cfy42/bin/activate +pip install pip==9.0.3 +cfy profiles use 127.0.0.1 -u admin -p admin -t default_tenant +cfy status +cd /tmp/bin +./build-plugins.sh https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R4 https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases +for wagon in ./wagons/*.wgn; do cfy plugins upload \$wagon ; done +deactivate +EOL + +#wget -O scripts-in-container/build-plugins.sh https://git.onap.org/dcaegen2/deployments/plain/k8s-bootstrap-container/build-plugins.sh +cat > scripts-in-container/build-plugins.sh << EOL +#!/bin/bash + +# Pull plugin archives from repos +# Build wagons +# $1 is the DCAE repo URL +# $2 is the CCSDK repo URL +# (This script runs at Docker image build time) +# +set -x +DEST=wagons + +# For DCAE, we get zips of the archives and build wagons +DCAEPLUGINFILES=\ +"\ +relationshipplugin/1.0.0/relationshipplugin-1.0.0.tgz +dcaepolicyplugin/2.3.0/dcaepolicyplugin-2.3.0.tgz +dockerplugin/3.2.1/dockerplugin-3.2.1.tgz \ +" + +# For CCSDK, we pull down the wagon files directly +CCSDKPLUGINFILES=\ +"\ +plugins/pgaas-1.1.0-py27-none-any.wgn +plugins/sshkeyshare-1.0.0-py27-none-any.wgn +" + +# Build a set of wagon files from archives in a repo +# $1 -- repo base URL +# $2 -- list of paths to archive files in the repo +function build { + for plugin in $2 + do + # Could just do wagon create with the archive URL as source, + # but can't use a requirements file with that approach + mkdir work + target=$(basename ${plugin}) + curl -Ss $1/${plugin} > ${target} + tar zxvf ${target} --strip-components=2 -C work + wagon create -t tar.gz -o ${DEST} -r work/requirements.txt --validate ./work + rm -rf work + done +} + +# Copy a set of wagons from a repo +# $1 -- repo baseURL +# $2 -- list of paths to wagons in the repo +function get_wagons { + for wagon in $2 + do + target=$(basename ${wagon}) + curl -Ss $1/${wagon} > ${DEST}/${target} + done +} + +mkdir ${DEST} +build $1 "${DCAEPLUGINFILES}" +get_wagons $2 "${CCSDKPLUGINFILES}" +EOL + +chmod 777 scripts-in-container/* + +echo "Launching Cloudify Manager container" +docker run -d \ +--network="${NETWORK}" \ +--name cloudify-manager \ +--restart unless-stopped \ +-v /sys/fs/cgroup:/sys/fs/cgroup:ro \ +-v /opt/app/config/scripts-in-container:/tmp/bin \ +-p 80:80 \ +--tmpfs /run \ +--tmpfs /run/lock \ +--security-opt seccomp:unconfined \ +--cap-add SYS_ADMIN \ +--label "SERVICE_80_NAME=cloudify_manager" \ +--label "SERVICE_80_CHECK_TCP=true" \ +--label "SERVICE_80_CHECK_INTERVAL=15s" \ +--label "SERVICE_80_CHECK_INITIAL_STATUS=passing" \ +{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.cm-container:{{ dcae_docker_cm }} + +echo "Cloudify Manager deployed, waiting for completion" +while ! nc -z localhost 80; do sleep 1; done + +echo "Upload plugins to Cloudify Manager" + +# run as detached because this script is intended to be run in background +docker exec -itd cloudify-manager /tmp/bin/install-plugins.sh + +echo "Cloudify Manager setup complete" + diff --git a/archive/heat/teardown.sh b/archive/heat/teardown.sh new file mode 100755 index 0000000..19d74a7 --- /dev/null +++ b/archive/heat/teardown.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# + + +cd /opt/app/config + +echo "Stop and remove cloudify-manager registrator dcae-health" +docker stop cloudify-manager registrator dcae-health +docker rm cloudify-manager registrator dcae-health + +echo "Stand down R2PLUS service components" +/opt/docker/docker-compose -f ./docker-compose-4.yaml down +echo "Stand down R2 platform components" +/opt/docker/docker-compose -f ./docker-compose-3.yaml down +echo "Stand down R2 minimum service components" +/opt/docker/docker-compose -f ./docker-compose-2.yaml down +echo "Stand down R2 shared platform components" +/opt/docker/docker-compose -f ./docker-compose-1.yaml down +echo "Teardown done" |