diff options
author | Vijay Venkatesh Kumar <vv770d@att.com> | 2018-04-16 14:24:32 +0000 |
---|---|---|
committer | Gerrit Code Review <gerrit@onap.org> | 2018-04-16 14:24:32 +0000 |
commit | 6a6df9ea6b3c0002acc375b7eefbea8a63ace3f7 (patch) | |
tree | a600f088c30e0809166e3c3a886bf4be288ba33f | |
parent | 819aa7fe50409266aea42264523be5fa6f0c9dbe (diff) | |
parent | 44a941b0e46d8fd8a2f5eeb1ffa3cde9a5b2e140 (diff) |
Merge "Heat deployment files"
-rwxr-xr-x | heat/R2MVP/build-plugins.sh | 73 | ||||
-rw-r--r-- | heat/R2MVP/docker-compose-1.yaml | 76 | ||||
-rw-r--r-- | heat/R2MVP/docker-compose-2.yaml | 136 | ||||
-rw-r--r-- | heat/R2MVP/docker-compose-3.yaml | 82 | ||||
-rw-r--r-- | heat/R2MVP/docker-compose-4.yaml | 65 | ||||
-rwxr-xr-x | heat/R2MVP/register.sh | 162 | ||||
-rwxr-xr-x | heat/R2MVP/setup.sh | 70 |
7 files changed, 598 insertions, 66 deletions
diff --git a/heat/R2MVP/build-plugins.sh b/heat/R2MVP/build-plugins.sh new file mode 100755 index 0000000..87ec960 --- /dev/null +++ b/heat/R2MVP/build-plugins.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# ============LICENSE_START======================================================= +# org.onap.dcae +# ================================================================================ +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + +# Pull plugin archives from repos +# Build wagons +# $1 is the DCAE repo URL +# $2 is the CCSDK repo URL +# (This script runs at Docker image build time) +# +set -x +DEST=wagons + +# For DCAE, we get zips of the archives and build wagons +DCAEPLUGINFILES=\ +"\ +k8splugin/1.0.1/k8splugin-1.0.1.tgz +dcaepolicyplugin/2.2.1/dcaepolicyplugin-2.2.1.tgz \ +" + +# For CCSDK, we pull down the wagon files directly +CCSDKPLUGINFILES=\ +"\ +plugins/pgaas-1.1.0-py27-none-any.wgn +plugins/sshkeyshare-1.0.0-py27-none-any.wgn +" + +# Build a set of wagon files from archives in a repo +# $1 -- repo base URL +# $2 -- list of paths to archive files in the repo +function build { + for plugin in $2 + do + # Could just do wagon create with the archive URL as source, + # but can't use a requirements file with that approach + mkdir work + target=$(basename ${plugin}) + curl -Ss $1/${plugin} > ${target} + tar zxvf ${target} --strip-components=2 -C work + wagon create -t tar.gz -o ${DEST} -r work/requirements.txt --validate ./work + rm -rf work + done +} + +# Copy a set of wagons from a repo +# $1 -- repo baseURL +# $2 -- list of paths to wagons in the repo +function get_wagons { + for wagon in $2 + do + target=$(basename ${wagon}) + curl -Ss $1/${wagon} > ${DEST}/${target} + done +} + +mkdir ${DEST} +build $1 "${DCAEPLUGINFILES}" +get_wagons $2 "${CCSDKPLUGINFILES}" diff --git a/heat/R2MVP/docker-compose-1.yaml b/heat/R2MVP/docker-compose-1.yaml index 6d5ed67..64ef1ce 100644 --- a/heat/R2MVP/docker-compose-1.yaml +++ b/heat/R2MVP/docker-compose-1.yaml @@ -1,33 +1,81 @@ +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# version: '2.1' services: - db: - image: postgres:9.5 - container_name: "db" - hostname: "db" + pgHolmes: + image: "postgres:9.5" + container_name: "pgHolmes" + hostname: "phHolmes" + environment: + - "POSTGRES_USER=holmes" + - "POSTGRES_PASSWORD=holmespwd" ports: - "5432:5432" + labels: + - "SERVICE_5432_NAME=pgHolmes" + - "SERVICE_5432_CHECK_TCP=true" + - "SERVICE_5432_CHECK_INTERVAL=15s" + - "SERVICE_5432_CHECK_INITIAL_STATUS=passing" + + pgInventory: + image: "postgres:9.5" + container_name: "pgInventory" + hostname: "pgInventory" environment: - - POSTGRES_USER=holmes - - POSTGRES_PASSWORD=holmespwd + - "POSTGRES_USER=inventory" + - "POSTGRES_PASSWORD=inventorypwd" + ports: + - "5433:5432" + labels: + - "SERVICE_5432_NAME=pgInventory" + - "SERVICE_5432_CHECK_TCP=true" + - "SERVICE_5432_CHECK_INTERVAL=15s" + - "SERVICE_5432_CHECK_INITIAL_STATUS=passing" + consul: image: consul:0.8.3 container_name: "consul" hostname: "consul" ports: - - "8400:8400" - "8500:8500" - - "8600:8600" - command: "agent -server -bootstrap-expect 1 -client 0.0.0.0" + environment: + - "DOCKER_HOST=tcp://{{ dcae_ip_addr }}:2376" + command: "agent -ui -server -bootstrap-expect 1 -client 0.0.0.0 -log-level trace" + labels: + - "SERVICE_8500_NAME=consul" + - "SERVICE_8500_CHECK_HTTP=/v1/agent/services" + - "SERVICE_8500_CHECK_INTERVAL=15s" + - "SERVICE_8500_CHECK_INITIAL_STATUS=passing" + - config_binding_service: - image: {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.configbinding:v1.2.0 + config-binding-service: + image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.platform.configbinding:{{ dcae_docker_cbs }}" container_name: "config_binding_service" - hostname: "config_binding_service" + hostname: "config-binding-service" environment: - - CONSUL_HOST="consul" + - "CONSUL_HOST=consul" ports: - "10000:10000" depends_on: - - consul + - "consul" + labels: + - "SERVICE_10000_NAME=config_binding_service" + - "SERVICE_10000_CHECK_HTTP=/healthcheck" + - "SERVICE_10000_CHECK_INTERVAL=15s" + - "SERVICE_10000_CHECK_INITIAL_STATUS=passing" diff --git a/heat/R2MVP/docker-compose-2.yaml b/heat/R2MVP/docker-compose-2.yaml index 707cc54..35c9466 100644 --- a/heat/R2MVP/docker-compose-2.yaml +++ b/heat/R2MVP/docker-compose-2.yaml @@ -1,70 +1,102 @@ +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# version: '2.1' services: - ves: - image: {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.collectors.ves.vescollector:1.2.0 - container_name: "ves" - hostname: "ves" + + mvp-dcaegen2-collectors-ves: + image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.collectors.ves.vescollector:{{ dcae_docker_ves }}" + container_name: "mvp-dcaegen2-collectors-ves" + hostname: "mvp-dcaegen2-collectors-ves" environment: - - DMAAPHOST={{ mr_ip_addr }} - - CONSUL_HOST=consul - - CONSUL_PORT=8500 - - CONFIG_BINDING_SERVICE=config_binding_service - - SERVICE_NAME=ves - - HOSTNAME=ves + - "DMAAPHOST={{ mr_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CONFIG_BINDING_SERVICE=config_binding_service" + - "SERVICE_NAME=mvp-dcaegen2-collectors-ves" + - "HOSTNAME=mvp-dcaegen2-collectors-ves" ports: - - "8080:8080" - - "8443:8443" + - "8081:8080" + labels: + - "SERVICE_8080_NAME=mvp-dcaegen2-collectors-ves" + - "SERVICE_8080_CHECK_HTTP=/healthcheck" + - "SERVICE_8080_CHECK_INTERVAL=15s" + - "SERVICE_8080_CHECK_INITIAL_STATUS=passing" + - tca: - image: {{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.0.0 - container_name: tca - hostname: tca + mvp-dcaegen2-analytics-tca: + image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.deployments.tca-cdap-container:{{ dcae_docker_tca }}" + container_name: "mvp-dcaegen2-analytics-tca" + hostname: "mvp-dcaegen2-analytics-tca" environment: - - DMAAPHOST={{ mr_ip_addr }} - - DMAAPPORT=3904 - - DMAAPPUBTOPIC=unauthenticated.DCAE_CL_OUTPUT - - DMAAPSUBTOPIC=unauthenticated.SEC_MEASUREMENT_OUTPUT - - AAIHOST={{ aai1_ip_addr }} - - AAIPORT=8443 - - CONSUL_HOST=consul - - CONSUL_PORT=8500 - - CBS_HOST=cbs - - CBS_PORT=10000 - - SERVICE_NAME=tca - - HOSTNAME=tca - - CONFIG_BINDING_SERVICE=config_binding_service + - "DMAAPHOST={{ mr_ip_addr }}" + - "DMAAPPORT=3904" + - "DMAAPPUBTOPIC=unauthenticated.DCAE_CL_OUTPUT" + - "DMAAPSUBTOPIC=unauthenticated.SEC_MEASUREMENT_OUTPUT" + - "AAIHOST={{ aai1_ip_addr }}" + - "AAIPORT=8443" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "CBS_HOST=config-binding-service" + - "CBS_PORT=10000" + - "SERVICE_NAME=mvp-dcaegen2-analytics-tca" + - "HOSTNAME=mvp-dcaegen2-analytics-tca" + - "CONFIG_BINDING_SERVICE=config_binding_service" # set the parameter below to enable REDIS caching. #- REDISHOSTPORT=redis-cluster:6379 ports: - "11011:11011" - - "11015:11015" + #- "11015:11015" + labels: + - "SERVICE_11011_NAME=mvp-dcaegen2-analytics-tca" + - "SERVICE_11011_CHECK_HTTP=/cdap/ns/cdap_tca_hi_lo" + - "SERVICE_11011_CHECK_INTERVAL=15s" + - "SERVICE_11011_CHECK_INITIAL_STATUS=passing" - holmes-engine: - image: {{ nexus_docker_repo }}/onap/holmes/engine-management:latest - container_name: he - hostname: he + mvp-dcae-analytics-holmes-engine-management: + image: "{{ nexus_docker_repo}}/onap/holmes/engine-management:{{ holmes_docker_em }}" + container_name: "mvp-dcae-analytics-holmes-engine-management" + hostname: "mvp-dcae-analytics-holmes-engine-management" environment: - - URL_JDBC=db:5432 - - JDBC_USERNAME=holmes - - JDBC_PASSWORD=holmespwd - - MSB_ADDR={{ msb_ip_addr }} - - CONSUL_HOST=consul - - CONSUL_PORT=8500 - - HOSTNAME=he + - "URL_JDBC=pgHolmes:5432" + - "JDBC_USERNAME=holmes" + - "JDBC_PASSWORD=holmespwd" + - "MSB_ADDR={{ msb_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "HOSTNAME=mvp-dcae-analytics-holmes-engine-management" ports: - "9102:9102" + labels: + - "SERVICE_9102_IGNORE=true" - holmes-rule: - image: {{ nexus_docker_repo }}/onap/holmes/rule-management:latest - container_name: hr - hostname: hr + mvp-dcae-analytics-holmes-rule-management: + image: "{{ nexus_docker_repo}}/onap/holmes/rule-management:{{ holmes_docker_rm }}" + container_name: "mvp-dcae-analytics-holmes-rule-management" + hostname: "mvp-dcae-analytics-holmes-rule-management" environment: - - URL_JDBC=db:5432 - - JDBC_USERNAME=holmes - - JDBC_PASSWORD=holmespwd - - MSB_ADDR={{ msb_ip_addr }} - - CONSUL_HOST=consul - - CONSUL_PORT=8500 - - HOSTNAME=hr + - "URL_JDBC=pgHolmes:5432" + - "JDBC_USERNAME=holmes" + - "JDBC_PASSWORD=holmespwd" + - "MSB_ADDR={{ msb_ip_addr }}" + - "CONSUL_HOST=consul" + - "CONSUL_PORT=8500" + - "HOSTNAME=mvp-dcae-analytics-holmes-rule-management" ports: - "9101:9101" + labels: + - "SERVICE_9101_IGNORE=true" + diff --git a/heat/R2MVP/docker-compose-3.yaml b/heat/R2MVP/docker-compose-3.yaml new file mode 100644 index 0000000..b65e37c --- /dev/null +++ b/heat/R2MVP/docker-compose-3.yaml @@ -0,0 +1,82 @@ +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# +version: '2.1' +services: + + inventory: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.inventory-api:{{ dcae_docker_inv }}" + restart: always + container_name: "inventory" + hostname: "inventory" + environment: + - "POSTGRES_USER=inventory" + - "POSTGRES_PASSWORD=inventorypwd" + ports: + - "8080:8080" + labels: + - "SERVICE_8080_NAME=inventory" + - "SERVICE_8080_CHECK_HTTP=/dcae-service-types" + - "SERVICE_8080_CHECK_INTERVAL=15s" + - "SERVICE_8080_CHECK_INITIAL_STATUS=passing" + + + service-change-handler: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.servicechange-handler:{{ dcae_docker_sch }}" + container_name: "service-change-handler" + hostname: "service-change-handler" + ports: + - "8079:8079" + environment: + - "POSTGRES_USER=inventory" + - "POSTGRES_PASSWORD=inventorypwd" + labels: + - "SERVICE_NAME=service_change_handler" + - "SERVICE_CHECK_SCRIPT=/opt/health.sh" + - "SERVICE_CHECK_INTERVAL=15s" + - "SERVICE_CHECK_INITIAL_STATUS=passing" + + + deployment_handler: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.deployment-handler:{{ dcae_docker_dh }}" + restart: always + container_name: "deployment-handler" + hostname: "deployment-handler" + environment: + - "CLOUDIFY_PASSWORD=admin" + - "CLOUDIFY_USER=admin" + ports: + - "8188:8443" + labels: + - "SERVICE_8443_NAME=deployment_handler" + - "SERVICE_8443_CHECK_HTTP=/" + - "SERVICE_8443_CHECK_INTERVAL=15s" + - "SERVICE_8443_CHECK_INITIAL_STATUS=passing" + + + policy_handler: + image: "{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.platform.policy-handler:{{ dcae_docker_ph }}" + restart: always + container_name: "policy-handler" + hostname: "policy-handler" + ports: + - "25577:25577" + labels: + - "SERVICE_25577_NAME=policy_handler" + - "SERVICE_25577_CHECK_HTTP=/healthcheck" + - "SERVICE_25577_CHECK_INTERVAL=15s" + - "SERVICE_25577_CHECK_INITIAL_STATUS=passing" + diff --git a/heat/R2MVP/docker-compose-4.yaml b/heat/R2MVP/docker-compose-4.yaml new file mode 100644 index 0000000..4b5d63f --- /dev/null +++ b/heat/R2MVP/docker-compose-4.yaml @@ -0,0 +1,65 @@ +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# +version: '2.1' +services: + heartbeat: + image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.platform.heartbeat:{{ dcae_docker_heartbeat }}" + + #image: nexus3.onap.org:10001/onap/org.onap.dcaegen2.services.heartbeat:latest + container_name: heartbeat + hostname: heartbeat + environment: + - CONSUL_HOST=consul + - CONSUL_PORT=8500 + - HOSTNAME= + ports: + - "1003:1003" + labels: + - SERVICE_NAME=static-dcaegen2-services-heartbeat + - SERVICE_CHECK_INITIAL_STATUS=passing + + + prh: + image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.services.prh.prh-app-server:{{ dcae_docker_prh }}" + container_name: prh + hostname: prh + environment: + - CONSUL_HOST=consul + - CONSUL_PORT=8500 + - HOSTNAME= + ports: + - "1002:1002" + labels: + - SERVICE_NAME=static-dcaegen2-services-prh-prh-app-server + - SERVICE_CHECK_INITIAL_STATUS=passing + + + snmptrap: + image: nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.snmptrap:latest + image: "{{ nexus_docker_repo}}/onap/org.onap.dcaegen2.collectors.snmptrap:{{ dcae_docker_snmptrap }}" + container_name: snmptrap + hostname: snmptrap + environment: + - CONSUL_HOST=consul + - CONSUL_PORT=8500 + - HOSTNAME= + ports: + - "162:162/udp" + labels: + - SERVICE_NAME=static-dcaegen2-collectors-snamptrap + - SERVICE_CHECK_INITIAL_STATUS=passing + diff --git a/heat/R2MVP/register.sh b/heat/R2MVP/register.sh new file mode 100755 index 0000000..d6d407a --- /dev/null +++ b/heat/R2MVP/register.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# + + + +# We now register services that are not handled by Registrator +# minimum platform components +HOSTNAME_CONSUL="consul" +HOSTNAME_CM="cloudify_manager" +HOSTNAME_CBS="config_binding_service" + +# R2 MVP service components +HOSTNAME_MVP_VES="mvp-dcaegen2-collectors-ves" +HOSTNAME_MVP_TCA="mvp-cdap_app_cdap_app_tca" +HOSTNAME_MVP_HR="mvp-dcae-analytics-holmes-rule-management" +HOSTNAME_MVP_HE="mvp-dcae-analytics-holmes-engine-management" + + +# registering docker host +SVC_HOSTNAME="dockerhost" +DOCKERHOST_IP="$(cat /opt/config/dcae_ip_addr.txt)" +REGREQ=" +{ + \"Name\" : \"${SVC_HOSTNAME}\", + \"ID\" : \"dockerhost\", + \"Address\": \"${SVC_HOSTNAME}\", + \"Port\": 2376, + \"Check\" : { + \"Name\" : \"${SVC_HOSTNAME}_health\", + \"Interval\" : \"15s\", + \"HTTP\" : \"http://${DOCKERHOST_IP}:2376/containers/registrator/json\", + \"Status\" : \"passing\", + \"DeregisterCriticalServiceAfter\" : \"30m\" + } +} +" +curl -X PUT -H 'Content-Type: application/json' --data-binary "$REGREQ" "http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" + + + +# registering Holmes services +SVC_HOSTNAME="${HOSTNAME_MVP_HR}" +DOCKERHOST_IP="$(cat /opt/config/dcae_ip_addr.txt)" +REGREQ=" +{ + \"Name\" : \"${SVC_HOSTNAME}\", + \"ID\" : \"${SVC_HOSTNAME}\", + \"Address\": \"${SVC_HOSTNAME}\", + \"Port\": 9101, + \"Check\" : { + \"Name\" : \"${SVC_HOSTNAME}_health\", + \"Interval\" : \"15s\", + \"HTTP\" : \"https://${DOCKERHOST_IP}:9101/api/holmes-rule-mgmt/v1/healthcheck\", + \"tls_skip_verify\": true, + \"Status\" : \"passing\", + \"DeregisterCriticalServiceAfter\" : \"30m\" + } +} +" +curl -v -X PUT -H 'Content-Type: application/json' --data-binary "$REGREQ" "http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" + + +SVC_HOSTNAME="${HOSTNAME_MVP_HE}" +DOCKERHOST_IP="$(cat /opt/config/dcae_ip_addr.txt)" +REGREQ=" +{ + \"Name\" : \"${SVC_HOSTNAME}\", + \"ID\" : \"${SVC_HOSTNAME}\", + \"Address\": \"${SVC_HOSTNAME}\", + \"Port\": 9102, + \"Check\" : { + \"Name\" : \"${SVC_HOSTNAME}_health\", + \"Interval\" : \"15s\", + \"HTTP\" : \"https://${DOCKERHOST_IP}:9102/api/holmes-engine-mgmt/v1/healthcheck\", + \"tls_skip_verify\": true, + \"Status\" : \"passing\", + \"DeregisterCriticalServiceAfter\" : \"30m\" + } +} +" +curl -v -X PUT -H 'Content-Type: application/json' --data-binary "$REGREQ" "http://${HOSTNAME_CONSUL}:8500/v1/agent/service/register" +!/bin/bash + + + + +# now push KVs +# generated with https://www.browserling.com/tools/json-escape +# config binding service +REGKV="{}" +curl -v -X PUT -H "Content-Type: application/json" --data "${REGKV}" http://${HOSTNAME_CONSUL}:8500/v1/kv/config_binding_service +# checked + +# inventory +REGKV="{\"database\":{\"checkConnectionWhileIdle\":false,\"driverClass\":\"org.postgresql.Driver\",\"evictionInterval\":\"10s\",\"initialSize\":2,\"maxSize\":8,\"maxWaitForConnection\":\"1s\",\"minIdleTime\":\"1 minute\",\"minSize\":2,\"password\":\"inventorypwd\",\"properties\":{\"charSet\":\"UTF-8\"},\"url\":\"jdbc:postgresql:\/\/pgInventory:5432\/postgres\",\"user\":\"inventory\",\"validationQuery\":\"\/* MyService Health Check *\/ SELECT 1\"},\"databusControllerConnection\":{\"host\":\"databus-controller-hostname\",\"mechId\":null,\"password\":null,\"port\":8443,\"required\":false},\"httpClient\":{\"connectionTimeout\":\"5000milliseconds\",\"gzipEnabled\":false,\"gzipEnabledForRequests\":false,\"maxThreads\":128,\"minThreads\":1,\"timeout\":\"5000milliseconds\"}}" +curl -v -X PUT -H "Content-Type: application/json" --data "${REGKV}" http://${HOSTNAME_CONSUL}:8500/v1/kv/inventory +# checked + +# policy handler +REGKV="{\"policy_handler\": {\"deploy_handler\": \"deployment_handler\", \"thread_pool_size\": 4, \"policy_retry_count\": 5, \"scope_prefixes\": [\"DCAE.Config_\"], \"pool_connections\": 20, \"policy_retry_sleep\": 5, \"policy_engine\": {\"path_api\": \"/pdp/api/\", \"headers\": {\"Environment\": \"TEST\", \"ClientAuth\": \"cHl0aG9uOnRlc3Q=\", \"Content-Type\": \"application\/json\", \"Accept\": \"application/json\", \"Authorization\": \"Basic dGVzdHBkcDphbHBoYTEyMw==\"}, \"path_pdp\": \"/pdp/\", \"url\": \"http://10.0.6.1:8081\", \"target_entity\": \"policy_engine\"}}}" +curl -v -X PUT -H "Content-Type: application/json" --data "${REGKV}" http://${HOSTNAME_CONSUL}:8500/v1/kv/policy_handler + + +# service change handler +REGKV="{\"asdcDistributionClient\": {\"asdcAddress\": \"10.0.3.1:8443\",\"asdcUri\": \"https:\/\/10.0.3.1:8443\", \"msgBusAddress\": \"10.0.11.1\", \"user\": \"dcae\", \"password\": \"Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U\", \"pollingInterval\": 20, \"pollingTimeout\": 20, \"consumerGroup\": \"dcae\", \"consumerId\": \"dcae-sch\", \"environmentName\": \"AUTO\", \"keyStorePath\": null, \"keyStorePassword\": null, \"activateServerTLSAuth\": false, \"useHttpsWithDmaap\": false, \"isFilterInEmptyResources\": false}, \"dcaeInventoryClient\": { \"uri\": \"http:\/\/inventory:8080\" }}" +curl -v -X PUT -H "Content-Type: application/json" --data "${REGKV}" http://${HOSTNAME_CONSUL}:8500/v1/kv/service-change-handler + + +# deployment handler +REGKV="{\"cloudify\": {\"protocol\": \"http\"}, \"inventory\": {\"protocol\": \"http\"}}" +curl -v -X PUT -H "Content-Type: application/json" --data "${REGKV}" http://${HOSTNAME_CONSUL}:8500/v1/kv/deployment_handler + + +# ves +REGKV="{\"event.transform.flag\": \"0\", \"tomcat.maxthreads\": \"200\", \"collector.schema.checkflag\": \"1\", \"collector.dmaap.streamid\": \"fault=ves_fault|syslog=ves_syslog|heartbeat=ves_heartbeat|measurementsForVfScaling=ves_measurement|mobileFlow=ves_mobileflow|other=ves_other|stateChange=ves_statechange|thresholdCrossingAlert=ves_thresholdCrossingAlert|voiceQuality=ves_voicequality|sipSignaling=ves_sipsignaling\", \"collector.service.port\": \"8080\", \"collector.schema.file\": \"{\\\"v1\\\":\\\"./etc/CommonEventFormat_27.2.json\\\",\\\"v2\\\":\\\"./etc/CommonEventFormat_27.2.json\\\",\\\"v3\\\":\\\"./etc/CommonEventFormat_27.2.json\\\",\\\"v4\\\":\\\"./etc/CommonEventFormat_27.2.json\\\",\\\"v5\\\":\\\"./etc/CommonEventFormat_28.4.json\\\"}\", \"collector.keystore.passwordfile\": \"/opt/app/VESCollector/etc/passwordfile\", \"collector.inputQueue.maxPending\": \"8096\", \"streams_publishes\": {\"ves_measurement\": {\"type\": \"message_router\", \"dmaap_info\": {\"topic_url\": \"http://10.0.11.1:3904/events/unauthenticated.SEC_MEASUREMENT_OUTPUT/\"}}, \"ves_fault\": {\"type\": \"message_router\", \"dmaap_info\": {\"topic_url\": \"http://10.0.11.1:3904/events/unauthenticated.SEC_FAULT_OUTPUT/\"}}}, \"collector.service.secure.port\": \"8443\", \"header.authflag\": \"0\", \"collector.keystore.file.location\": \"/opt/app/VESCollector/etc/keystore\", \"collector.keystore.alias\": \"dynamically generated\", \"services_calls\": [], \"header.authlist\": \"userid1,base64encodepwd1|userid2,base64encodepwd2\"}" +curl -v -X PUT -H "Content-Type: application/json" --data "${REGKV}" http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcaegen2-collectors-ves + + +# holmes rule management +REGKV='{\"streams_subscribes\": {}, \"msb.hostname\": \"10.0.14.1\", \"msb.uri\": \"/api/microservices/v1/services\", \"streams_publishes\": {}, \"holmes.default.rule.volte.scenario1\": \"ControlLoop-VOLTE-2179b738-fd36-4843-a71a-a8c24c70c55b\$\$\$package org.onap.holmes.droolsRule;\\n\\nimport org.onap.holmes.common.dmaap.DmaapService;\\nimport org.onap.holmes.common.api.stat.VesAlarm;\\nimport org.onap.holmes.common.aai.CorrelationUtil;\\nimport org.onap.holmes.common.dmaap.entity.PolicyMsg;\\nimport org.onap.holmes.common.dropwizard.ioc.utils.ServiceLocatorHolder;\\nimport org.onap.holmes.common.utils.DroolsLog;\\n \\n\\nrule \\\"Relation_analysis_Rule\\\"\\nsalience 200\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 0,\\n \$sourceId: sourceId, sourceId != null && !sourceId.equals(\\\"\\\"),\\n\\t\\t\\t\$sourceName: sourceName, sourceName \!= null \&\& \!sourceName.equals(\\\"\\\"),\\n\\t\\t\\t\$startEpochMicrosec: startEpochMicrosec,\\n eventName in (\\\"Fault_MultiCloud_VMFailure\\\"),\\n \$eventId: eventId)\\n \$child : VesAlarm( eventId \!= $eventId, parentId == null,\\n CorrelationUtil.getInstance().isTopologicallyRelated(sourceId, \$sourceId, \$sourceName),\\n eventName in (\\\"Fault_MME_eNodeB out of service alarm\\\"),\\n startEpochMicrosec \< \$startEpochMicrosec + 60000 \&\& startEpochMicrosec \> \$startEpochMicrosec - 60000 )\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"Relation_analysis_Rule: rootId=\\\" + \$root.getEventId() + \\\", childId=\\\" + \$child.getEventId());\\n\\t\\t\$child.setParentId(\$root.getEventId());\\n\\t\\tupdate(\$child);\\n\\t\\t\\nend\\n\\nrule \\\"root_has_child_handle_Rule\\\"\\nsalience 150\\nno-loop true\\n\\twhen\\n\\t\\t\$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0, \$eventId: eventId)\\n\\t\\t\$child : VesAlarm(eventId \!= $eventId, parentId == $eventId)\\n\\tthen\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_has_child_handle_Rule: rootId=\\\" + \$root.getEventId() + \\\", childId=\\\" + $child.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, \$child, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\t\$root.setRootFlag(1);\\n\\t\\tupdate(\$root);\\nend\\n\\nrule \\\"root_no_child_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0,\\n sourceId \!= null \&\& \!sourceId.equals(\\\"\\\"),\\n\\t\\t\\tsourceName \!= null \&\& \!sourceName.equals(\\\"\\\"),\\n eventName in (\\\"Fault_MultiCloud_VMFailure\\\"))\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_no_child_handle_Rule: rootId=\\\" + \$root.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, null, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\t$root.setRootFlag(1);\\n\\t\\tupdate(\$root);\\nend\\n\\nrule \\\"root_cleared_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$root : VesAlarm(alarmIsCleared == 1, rootFlag == 1)\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"root_cleared_handle_Rule: rootId=\\\" + \$root.getEventId());\\n\\t\\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\\n\\t\\tPolicyMsg policyMsg = dmaapService.getPolicyMsg(\$root, null, \\\"org.onap.holmes.droolsRule\\\");\\n dmaapService.publishPolicyMsg(policyMsg, \\\"unauthenticated.DCAE_CL_OUTPUT\\\");\\n\\t\\tretract(\$root);\\nend\\n\\nrule \\\"child_handle_Rule\\\"\\nsalience 100\\nno-loop true\\n when\\n \$child : VesAlarm(alarmIsCleared == 1, rootFlag == 0)\\n then\\n\\t\\tDroolsLog.printInfo(\\\"===========================================================\\\");\\n\\t\\tDroolsLog.printInfo(\\\"child_handle_Rule: childId=\\\" + \$child.getEventId());\\n\\t\\tretract(\$child);\\nend\", \"services_calls\": {}}' + + + +REGKV='{"streams_subscribes": {}, "msb.hostname": "10.0.14.1", "msb.uri": "/api/microservices/v1/services", "streams_publishes": {}, "holmes.default.rule.volte.scenario1": "ControlLoop-VOLTE-2179b738-fd36-4843-a71a-a8c24c70c55b$$$package org.onap.holmes.droolsRule;\n\nimport org.onap.holmes.common.dmaap.DmaapService;\nimport org.onap.holmes.common.api.stat.VesAlarm;\nimport org.onap.holmes.common.aai.CorrelationUtil;\nimport org.onap.holmes.common.dmaap.entity.PolicyMsg;\nimport org.onap.holmes.common.dropwizard.ioc.utils.ServiceLocatorHolder;\nimport org.onap.holmes.common.utils.DroolsLog;\n \n\nrule \"Relation_analysis_Rule\"\nsalience 200\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 0,\n $sourceId: sourceId, sourceId != null && !sourceId.equals(\"\"),\n\t\t\t$sourceName: sourceName, sourceName != null && !sourceName.equals(\"\"),\n\t\t\t$startEpochMicrosec: startEpochMicrosec,\n eventName in (\"Fault_MultiCloud_VMFailure\"),\n $eventId: eventId)\n $child : VesAlarm( eventId != $eventId, parentId == null,\n CorrelationUtil.getInstance().isTopologicallyRelated(sourceId, $sourceId, $sourceName),\n eventName in (\"Fault_MME_eNodeB out of service alarm\"),\n startEpochMicrosec < $startEpochMicrosec + 60000 && startEpochMicrosec > $startEpochMicrosec - 60000 )\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"Relation_analysis_Rule: rootId=\" + $root.getEventId() + \", childId=\" + $child.getEventId());\n\t\t$child.setParentId($root.getEventId());\n\t\tupdate($child);\n\t\t\nend\n\nrule \"root_has_child_handle_Rule\"\nsalience 150\nno-loop true\n\twhen\n\t\t$root : VesAlarm(alarmIsCleared == 0, rootFlag == 0, $eventId: eventId)\n\t\t$child : VesAlarm(eventId != $eventId, parentId == $eventId)\n\tthen\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_has_child_handle_Rule: rootId=\" + $root.getEventId() + \", childId=\" + $child.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, $child, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\t$root.setRootFlag(1);\n\t\tupdate($root);\nend\n\nrule \"root_no_child_handle_Rule\"\nsalience 100\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 0, rootFlag == 0,\n sourceId != null && !sourceId.equals(\"\"),\n\t\t\tsourceName != null && !sourceName.equals(\"\"),\n eventName in (\"Fault_MultiCloud_VMFailure\"))\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_no_child_handle_Rule: rootId=\" + $root.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, null, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\t$root.setRootFlag(1);\n\t\tupdate($root);\nend\n\nrule \"root_cleared_handle_Rule\"\nsalience 100\nno-loop true\n when\n $root : VesAlarm(alarmIsCleared == 1, rootFlag == 1)\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"root_cleared_handle_Rule: rootId=\" + $root.getEventId());\n\t\tDmaapService dmaapService = ServiceLocatorHolder.getLocator().getService(DmaapService.class);\n\t\tPolicyMsg policyMsg = dmaapService.getPolicyMsg($root, null, \"org.onap.holmes.droolsRule\");\n dmaapService.publishPolicyMsg(policyMsg, \"unauthenticated.DCAE_CL_OUTPUT\");\n\t\tretract($root);\nend\n\nrule \"child_handle_Rule\"\nsalience 100\nno-loop true\n when\n $child : VesAlarm(alarmIsCleared == 1, rootFlag == 0)\n then\n\t\tDroolsLog.printInfo(\"===========================================================\");\n\t\tDroolsLog.printInfo(\"child_handle_Rule: childId=\" + $child.getEventId());\n\t\tretract($child);\nend", "services_calls": {}}' +curl -v -X PUT -H "Content-Type: application/json" --data "${REGKV}" http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcae-analytics-holmes-rule-management + + + +# Holmes engine management +REGKV="{\"msb.hostname\": \"10.0.14.1\", \"services_calls\": {}, \"msb.uri\": \"\/api\/microservices\/v1\/services\", \"streams_publishes\": {\"dcae_cl_out\": {\"type\": \"message_router\", \"dmaap_info\": {\"topic_url\": \"http:\/\/10.0.11.1:3904\/events\/unauthenticated.DCAE_CL_OUTPUT\"}}}, \"streams_subscribes\": {\"ves_fault\": {\"type\": \"message_router\", \"dmaap_info\": {\"topic_url\": \"http:\/\/10.0.11.1:3904\/events\/unauthenticated.SEC_FAULT_OUTPUT\"}}}}" +REGKV="{\"msb.hostname\": \"10.0.14.1\", \"services_calls\": {}, \"msb.uri\": \"/api/microservices/v1/services\", \"streams_publishes\": {\"dcae_cl_out\": {\"type\": \"message_router\", \"dmaap_info\": {\"topic_url\": \"http://10.0.11.1:3904/events/unauthenticated.DCAE_CL_OUTPUT\"}}}, \"streams_subscribes\": {\"ves_fault\": {\"type\": \"message_router\", \"dmaap_info\": {\"topic_url\": \"http://10.0.11.1:3904/events/unauthenticated.SEC_FAULT_OUTPUT\"}}}}" +curl -v -X PUT -H "Content-Type: application/json" --data "${REGKV}" http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-dcae-analytics-holmes-engine-management + + +#curl http://localhost:8500/v1/kv/config_binding_service |jq .[0].Value |sed -e 's/\"//g' |base64 --decode + + + +# TCA +REGKV='{"thresholdCalculatorFlowletInstances":"2","tcaVESMessageStatusTableTTLSeconds":"86400","tcaVESMessageStatusTableName":"TCAVESMessageStatusTable","tcaVESAlertsTableTTLSeconds":"1728000","tcaVESAlertsTableName":"TCAVESAlertsTable","tcaSubscriberOutputStreamName":"TCASubscriberOutputStream","tcaAlertsAbatementTableTTLSeconds":"1728000","tcaAlertsAbatementTableName":"TCAAlertsAbatementTable","streams_subscribes":{},"streams_publishes":{},"services_calls":{},"appName":"dcae-tca","appDescription":"DCAE Analytics Threshold Crossing Alert Application"}' +curl -v -X PUT -H "Content-Type: application/json" --data "${REGKV}" http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-cdap_app_cdap_app_tca + + +# TCA pref +REGKV='{"tca_policy":"{\"domain\":\"measurementsForVfScaling\",\"metricsPerEventName\":[{\"eventName\":\"vFirewallBroadcastPackets\",\"controlLoopSchemaType\":\"VNF\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":300,\"direction\":\"LESS_OR_EQUAL\",\"severity\":\"MAJOR\",\"closedLoopEventStatus\":\"ONSET\"},{\"closedLoopControlName\":\"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":700,\"direction\":\"GREATER_OR_EQUAL\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]},{\"eventName\":\"vLoadBalancer\",\"controlLoopSchemaType\":\"VM\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\":300,\"direction\":\"GREATER_OR_EQUAL\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]},{\"eventName\":\"Measurement_vGMUX\",\"controlLoopSchemaType\":\"VNF\",\"policyScope\":\"DCAE\",\"policyName\":\"DCAE.Config_tca-hi-lo\",\"policyVersion\":\"v0.0.1\",\"thresholds\":[{\"closedLoopControlName\":\"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\",\"thresholdValue\":0,\"direction\":\"EQUAL\",\"severity\":\"MAJOR\",\"closedLoopEventStatus\":\"ABATED\"},{\"closedLoopControlName\":\"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\",\"version\":\"1.0.2\",\"fieldPath\":\"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\",\"thresholdValue\":0,\"direction\":\"GREATER\",\"severity\":\"CRITICAL\",\"closedLoopEventStatus\":\"ONSET\"}]}]}","subscriberTopicName":"unauthenticated.SEC_MEASUREMENT_OUTPUT","subscriberTimeoutMS":"-1","subscriberProtocol":"http","subscriberPollingInterval":"30000","subscriberMessageLimit":"-1","subscriberHostPort":"3904","subscriberHostName":"10.0.11.1","subscriberContentType":"application/json","subscriberConsumerId":"c12","subscriberConsumerGroup":"OpenDCAE-c12","publisherTopicName":"unauthenticated.DCAE_CL_OUTPUT","publisherProtocol":"http","publisherPollingInterval":"20000","publisherMaxRecoveryQueueSize":"100000","publisherMaxBatchSize":"1","publisherHostPort":"3904","publisherHostName":"10.0.11.1","publisherContentType":"application/json","enableAlertCEFFormat":"false","enableAAIEnrichment":true,"aaiVNFEnrichmentAPIPath":"/aai/v11/network/generic-vnfs/generic-vnf","aaiVMEnrichmentAPIPath":"/aai/v11/search/nodes-query","aaiEnrichmentUserPassword":"DCAE","aaiEnrichmentUserName":"DCAE","aaiEnrichmentProtocol":"https","aaiEnrichmentPortNumber":"8443","aaiEnrichmentIgnoreSSLCertificateErrors":"true","aaiEnrichmentHost":"10.0.1.1"}' +curl -v -X PUT -H "Content-Type: application/json" --data "${REGKV}" http://${HOSTNAME_CONSUL}:8500/v1/kv/mvp-cdap_app_cdap_app_tca:preferenes diff --git a/heat/R2MVP/setup.sh b/heat/R2MVP/setup.sh new file mode 100755 index 0000000..8bad24c --- /dev/null +++ b/heat/R2MVP/setup.sh @@ -0,0 +1,70 @@ +#!/bin/bash + + +############################################################################# +# +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +############################################################################# + + +NETWORK="config_default" + +echo "Cleaning up any previously deployed cludify manager and registrator" +docker stop registrator cloudify-manager +docker rm registrator cloudify-manager + +echo "Launching registrator on dockerhost" +docker run -d \ +--network=${NETWORK} \ +--name=registrator \ +-e EXTERNAL_IP={{ dcae_ip_addr }} \ +-e CONSUL_HOST=consul \ +-v /var/run/docker.sock:/tmp/docker.sock \ +onapdcae/registrator:v7 + +echo "Launching Cloudify Manager" +docker run -d \ +--network="${NETWORK}" \ +--name cloudify-manager \ +--restart unless-stopped \ +-v /sys/fs/cgroup:/sys/fs/cgroup:ro \ +-p 80:80 \ +--tmpfs /run \ +--tmpfs /run/lock \ +--security-opt seccomp:unconfined \ +--cap-add SYS_ADMIN \ +--label "SERVICE_80_NAME=cloudify_manager" \ +--label "SERVICE_80_CHECK_TCP=true" \ +--label "SERVICE_80_CHECK_INTERVAL=15s" \ +--label "SERVICE_80_CHECK_INITIAL_STATUS=passing" \ +{{ nexus_docker_repo }}/onap/org.onap.dcaegen2.deployments.cm-container:{{ dcae_docker_cm }} +echo "Cloudify Manager deployed, waiting for completion" +while ! nc -z localhost 80; do sleep 1; done + +echo "configure Cloudify Manager" +#8080, 5432 ports occupied +pip install cloudify==4.2 +cfy profiles use {{ dcae_ip_addr }} -u admin -p admin -t default_tenant +cfy status + +./build-plugins.sh \ +https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/R2 \ +https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/plugins/releases + +for wagon in ./wagons/*.wgn; do + cfy plugins upload ${wagon} +done + +echo "Setup complete" |