aboutsummaryrefslogtreecommitdiffstats
path: root/scripts/bootstrap.sh
blob: 8089439599516d9b28ea8710e74f2692eb55d659 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
#!/bin/bash
# ================================================================================
# Copyright (c) 2018-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2021 J. F. Lucas. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================

# Install DCAE via Cloudify Manager
# Expects:
#   CM address (IP or DNS) in CMADDR environment variable
#   CM password in CMPASS environment variable (assumes user is "admin")
#   ONAP common Kubernetes namespace in ONAP_NAMESPACE environment variable
#   If DCAE components are deployed in a separate Kubernetes namespace, that namespace in DCAE_NAMESPACE variable.
#   Blueprints for components to be installed in /blueprints
#   Input files for components to be installed in /inputs
# Optionally, allows:
#   CM protocol in CMPROTO environment variable (defaults to HTTP)
#   CM port in CMPORT environment variable (defaults to 80)
# If CMPROTO is set to "https", bootstrap will use HTTPS to communicate with CM.  Otherwise,
# it will use HTTP.
# If CMPROTO is set to "https", the script assumes the CA cert needed to verify the cert
# presented by CM is mounted at /certs/cacert.pem.

# Set defaults for CM protocol and port
CMPROTO=${CMPROTO:-http}
CMPORT=${CMPORT:-80}

# Set up additional parameters for using HTTPS
CACERT="/certs/cacert.pem"
CFYTLS=""
CURLTLS=""
if [ $CMPROTO = "https" ]
then
    CFYTLS="--rest-certificate $CACERT --ssl"
    CURLTLS="--cacert $CACERT"
fi

### FUNCTION DEFINITIONS ###

# keep_running: Keep running after bootstrap finishes or after error
keep_running() {
    echo $1
    sleep infinity &
    wait
}

# cm_hasany: Query Cloudify Manager and return 0 (true) if there are any entities matching the query
# Used to see if something is already present on CM
# $1 -- query fragment, for instance "plugins?archive_name=xyz.wgn" to get
#  the number of plugins that came from the archive file "xyz.wgn"
function cm_hasany {
    # We use _include=id to limit the amount of data the CM sends back
    # We rely on the "metadata.pagination.total" field in the response
    # for the total number of matching entities
    COUNT=$(curl -Ss -H "Tenant: default_tenant" --user admin:${CMPASS} ${CURLTLS} "${CMPROTO}://${CMADDR}:${CMPORT}/api/v3.1/$1&_include=id" \
             | /bin/jq .metadata.pagination.total)
    if (( $COUNT > 0 ))
    then
        return 0
    else
        return 1
    fi
}

# deploy: Deploy components if they're not already deployed
# $1 -- name (for bp and deployment)
# $2 -- blueprint file name
# $3 -- inputs file name (optional)
function deploy {
    # Don't crash the script on error
    set +e

    # Upload blueprint if it's not already there
    if cm_hasany "blueprints?id=$1"
    then
        echo blueprint $1 is already installed on ${CMADDR}
    else
        cfy blueprints upload -b $1  /blueprints/$2
    fi

    # Create deployment if it doesn't already exist
    if cm_hasany "deployments?id=$1"
    then
       echo deployment $1 has already been created on ${CMADDR}
    else
        INPUTS=
        if [ -n "$3" ]
        then
            INPUTS="-i/inputs/$3"
        fi
        cfy deployments create -b $1 ${INPUTS} $1
    fi

    # Run the install workflow if it hasn't been run already
    # We don't have a completely certain way of determining this.
    # We check to see if the deployment has any node instances
    # that are in the 'uninitialized' or 'deleted' states.  (Note that
    # the & in the query acts as a logical OR for the multiple state values.)
    # We'll try to install when a deployment has node instances in those states
    if cm_hasany "node-instances?deployment_id=$1&state=uninitialized&state=deleted"
    then
        cfy executions start -d $1 install
    else
        echo deployment $1 appears to have had an install workflow executed already or is not ready for an install
    fi
}


### END FUNCTION DEFINTIONS ###

set -x

# Make sure we keep the container alive after an error
trap keep_running ERR

set -e

# Set up profile to access Cloudify Manager
cfy profiles use -u admin -t default_tenant -p "${CMPASS}" ${CFYTLS} "${CMADDR}"

# Output status, for debugging purposes
cfy status

# Store the CM password into a Cloudify secret
cfy secret create -s ${CMPASS} cmpass

# After this point, failures should not stop the script or block later commands
trap - ERR
set +e

# Initialize the DCAE postgres instance
deploy pgaas_initdb k8s-pgaas-initdb.yaml k8s-pgaas-initdb-inputs.yaml

# Deploy service components
# tcagen2, ves, prh, hv-ves, datafile-collector can be deployed simultaneously
deploy tcagen2 k8s-tcagen2.yaml k8s-tcagen2-inputs.yaml &
deploy ves-tls k8s-ves.yaml k8s-ves-inputs-tls.yaml &
deploy prh k8s-prh.yaml k8s-prh-inputs.yaml &
deploy hv-ves k8s-hv-ves.yaml k8s-hv_ves-inputs.yaml &
# holmes_rules must be deployed before holmes_engine, but holmes_rules can go in parallel with other service components
deploy holmes_rules k8s-holmes-rules.yaml k8s-holmes_rules-inputs.yaml
deploy holmes_engine k8s-holmes-engine.yaml k8s-holmes_engine-inputs.yaml

# Display deployments, for debugging purposes
cfy deployments list

# Load blueprints into DCAE inventory as
# DCAE service types
. /scripts/inventory.sh
for BP in /blueprints/*.yaml
do
  upload_service_type $BP $CACERT
done

# Continue running
keep_running "Finished bootstrap steps."
echo "Exiting!"
ds --all-namespaces --show-all -o wide ) echo "${LIST_ALL}" # for use by continuous deployment only echo " deleting /dockerdata-nfs" sudo chmod -R 777 /dockerdata-nfs/onap sudo chmod -R 777 /dockerdata-nfs/dev rm -rf /dockerdata-nfs/onap rm -rf /dockerdata-nfs/dev fi # for use by continuous deployment only if [[ "$CLONE_NEW_OOM" != false ]]; then rm -rf oom echo "pull new oom" git clone -b $BRANCH http://gerrit.onap.org/r/oom # https://wiki.onap.org/display/DW/OOM+Helm+%28un%29Deploy+plugins sudo cp -R ~/oom/kubernetes/helm/plugins/ ~/.helm fi if [ "$BRANCH" == "amsterdam" ]; then echo "start config pod" # still need to source docker variables source oom/kubernetes/oneclick/setenv.bash #echo "source setenv override" echo "moving onap-parameters.yaml to oom/kubernetes/config" cp onap-parameters.yaml oom/kubernetes/config cd oom/kubernetes/config ./createConfig.sh -n $ENVIRON cd ../../../ echo "verify onap-config is 0/1 not 1/1 - as in completed - an error pod - means you are missing onap-parameters.yaml or values are not set in it." while [ $(kubectl get pods -n onap -a | grep config | grep 0/1 | grep Completed | wc -l) -eq 0 ]; do sleep 15 echo "waiting for config pod to complete" done else echo "moving values.yaml to oom/kubernetes/" #sudo cp values.yaml oom/kubernetes/onap fi # usually the prepull takes up to 25-300 min - however hourly builds will finish the docker pulls before the config pod is finished #echo "pre pull docker images - 35+ min" #wget https://jira.onap.org/secure/attachment/11261/prepull_docker.sh #chmod 777 prepull_docker.sh #./prepull_docker.sh echo "start onap pods" if [ "$BRANCH" == "amsterdam" ]; then cd oom/kubernetes/oneclick ./createAll.bash -n $ENVIRON cd ../../../ else cd oom/kubernetes/ sudo make clean sudo make all sudo make $ENVIRON #sudo helm install local/onap -n onap --namespace $ENVIRON sudo helm deploy onap local/onap --namespace $ENVIRON cd ../../ fi echo "wait for all pods up for 15-80 min" FAILED_PODS_LIMIT=0 MAX_WAIT_PERIODS=480 # 120 MIN COUNTER=0 PENDING_PODS=0 while [ $(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l) -gt $FAILED_PODS_LIMIT ]; do PENDING=$(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l) PENDING_PODS=$PENDING sleep 15 LIST_PENDING=$(kubectl get pods --all-namespaces -o wide | grep -E '0/|1/2' ) echo "${LIST_PENDING}" echo "${PENDING} pending > ${FAILED_PODS_LIMIT} at the ${COUNTER}th 15 sec interval" echo "" COUNTER=$((COUNTER + 1 )) MAX_WAIT_PERIODS=$((MAX_WAIT_PERIODS - 1)) if [ "$MAX_WAIT_PERIODS" -eq 0 ]; then FAILED_PODS_LIMIT=800 fi done echo "report on non-running containers" PENDING=$(kubectl get pods --all-namespaces | grep -E '0/|1/2') PENDING_COUNT=$(kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l) PENDING_COUNT_AAI=$(kubectl get pods -n $ENVIRON | grep aai- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_AAI" -gt 0 ]; then echo "down-aai=${PENDING_COUNT_AAI}" fi # todo don't stop if aai is down PENDING_COUNT_APPC=$(kubectl get pods -n $ENVIRON | grep appc- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_APPC" -gt 0 ]; then echo "down-appc=${PENDING_COUNT_APPC}" fi PENDING_COUNT_MR=$(kubectl get pods -n $ENVIRON | grep message-router- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_MR" -gt 0 ]; then echo "down-mr=${PENDING_COUNT_MR}" fi PENDING_COUNT_SO=$(kubectl get pods -n $ENVIRON | grep so- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_SO" -gt 0 ]; then echo "down-so=${PENDING_COUNT_SO}" fi PENDING_COUNT_POLICY=$(kubectl get pods -n $ENVIRON | grep policy- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_POLICY" -gt 0 ]; then echo "down-policy=${PENDING_COUNT_POLICY}" fi PENDING_COUNT_PORTAL=$(kubectl get pods -n $ENVIRON | grep portal- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_PORTAL" -gt 0 ]; then echo "down-portal=${PENDING_COUNT_PORTAL}" fi PENDING_COUNT_LOG=$(kubectl get pods -n $ENVIRON | grep log- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_LOG" -gt 0 ]; then echo "down-log=${PENDING_COUNT_LOG}" fi PENDING_COUNT_ROBOT=$(kubectl get pods -n $ENVIRON | grep robot- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_ROBOT" -gt 0 ]; then echo "down-robot=${PENDING_COUNT_ROBOT}" fi PENDING_COUNT_SDC=$(kubectl get pods -n $ENVIRON | grep sdc- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_SDC" -gt 0 ]; then echo "down-sdc=${PENDING_COUNT_SDC}" fi PENDING_COUNT_SDNC=$(kubectl get pods -n $ENVIRON | grep sdnc- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_SDNC" -gt 0 ]; then echo "down-sdnc=${PENDING_COUNT_SDNC}" fi PENDING_COUNT_VID=$(kubectl get pods -n $ENVIRON | grep vid- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_VID" -gt 0 ]; then echo "down-vid=${PENDING_COUNT_VID}" fi PENDING_COUNT_AAF=$(kubectl get pods -n $ENVIRON | grep aaf- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_AAF" -gt 0 ]; then echo "down-aaf=${PENDING_COUNT_AAF}" fi PENDING_COUNT_CONSUL=$(kubectl get pods -n $ENVIRON | grep consul- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_CONSUL" -gt 0 ]; then echo "down-consul=${PENDING_COUNT_CONSUL}" fi PENDING_COUNT_MSB=$(kubectl get pods -n $ENVIRON | grep msb- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_MSB" -gt 0 ]; then echo "down-msb=${PENDING_COUNT_MSB}" fi PENDING_COUNT_DCAE=$(kubectl get pods -n $ENVIRON | grep dcaegen2- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_DCAE" -gt 0 ]; then echo "down-dcae=${PENDING_COUNT_DCAE}" fi PENDING_COUNT_CLI=$(kubectl get pods -n $ENVIRON | grep cli- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_CLI" -gt 0 ]; then echo "down-cli=${PENDING_COUNT_CLI}" fi PENDING_COUNT_MULTICLOUD=$(kubectl get pods -n $ENVIRON | grep multicloud- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_MULTICLOUD" -gt 0 ]; then echo "down-multicloud=${PENDING_COUNT_MULTICLOUD}" fi PENDING_COUNT_CLAMP=$(kubectl get pods -n $ENVIRON | grep clamp- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_CLAMP" -gt 0 ]; then echo "down-clamp=${PENDING_COUNT_CLAMP}" fi PENDING_COUNT_VNFSDK=$(kubectl get pods -n $ENVIRON | grep vnfsdk- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_VNFSDK" -gt 0 ]; then echo "down-vnfsdk=${PENDING_COUNT_VNFSDK}" fi PENDING_COUNT_UUI=$(kubectl get pods -n $ENVIRON | grep uui- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_UUI" -gt 0 ]; then echo "down-uui=${PENDING_COUNT_UUI}" fi PENDING_COUNT_VFC=$(kubectl get pods -n $ENVIRON | grep vfc- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_VFC" -gt 0 ]; then echo "down-vfc=${PENDING_COUNT_VFC}" fi PENDING_COUNT_KUBE2MSB=$(kubectl get pods -n $ENVIRON | grep kube2msb- | grep -E '0/|1/2' | wc -l) if [ "$PENDING_COUNT_KUBE2MSB" -gt 0 ]; then echo "down-kube2msb=${PENDING_COUNT_KUBE2MSB}" fi echo "pending containers=${PENDING_COUNT}" echo "${PENDING}" echo "check filebeat 2/2 count for ELK stack logging consumption" FILEBEAT=$(kubectl get pods --all-namespaces -a | grep 2/) echo "${FILEBEAT}" echo "sleep 5 min - to allow rest frameworks to finish" sleep 300 echo "List of ONAP Modules" LIST_ALL=$(kubectl get pods --all-namespaces -a --show-all ) echo "${LIST_ALL}" echo "run healthcheck 2 times to warm caches and frameworks so rest endpoints report properly - see OOM-447" echo "curl with aai cert to cloud-region PUT" curl -X PUT https://127.0.0.1:30233/aai/v11/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/RegionOne --data "@aai-cloud-region-put.json" -H "authorization: Basic TW9kZWxMb2FkZXI6TW9kZWxMb2FkZXI=" -H "X-TransactionId:jimmy-postman" -H "X-FromAppId:AAI" -H "Content-Type:application/json" -H "Accept:application/json" --cacert aaiapisimpledemoopenecomporg_20171003.crt -k echo "get the cloud region back" curl -X GET https://127.0.0.1:30233/aai/v11/cloud-infrastructure/cloud-regions/ -H "authorization: Basic TW9kZWxMb2FkZXI6TW9kZWxMb2FkZXI=" -H "X-TransactionId:jimmy-postman" -H "X-FromAppId:AAI" -H "Content-Type:application/json" -H "Accept:application/json" --cacert aaiapisimpledemoopenecomporg_20171003.crt -k # OOM-484 - robot scripts moved cd oom/kubernetes/robot echo "run healthcheck prep 1" # OOM-722 adds namespace parameter if [ "$BRANCH" == "amsterdam" ]; then ./ete-k8s.sh health > ~/health1.out else ./ete-k8s.sh $ENVIRON health > ~/health1.out fi echo "sleep 5 min" sleep 300 echo "run healthcheck prep 2" if [ "$BRANCH" == "amsterdam" ]; then ./ete-k8s.sh health > ~/health2.out else ./ete-k8s.sh $ENVIRON health > ~/health2.out fi echo "run healthcheck for real - wait a further 5 min" sleep 300 if [ "$BRANCH" == "amsterdam" ]; then ./ete-k8s.sh health else ./ete-k8s.sh $ENVIRON health fi echo "run partial vFW" # sudo chmod 777 /dockerdata-nfs/onap # if [ "$BRANCH" == "amsterdam" ]; then # ./demo-k8s.sh init_robot # else # ./demo-k8s.sh $ENVIRON init # fi # if [ "$BRANCH" == "amsterdam" ]; then # ./demo-k8s.sh init # else # ./demo-k8s.sh $ENVIRON init # fi echo "report results" cd ../../../ echo "$(date)" #set +a } BRANCH= ENVIRON=onap APPLY_WORKAROUNDS=true DELETE_PREV_OOM=false REMOVE_OOM_AT_END=false CLONE_NEW_OOM=true while getopts ":u:b:e:c:d:w:r" PARAM; do case $PARAM in u) usage exit 1 ;; b) BRANCH=${OPTARG} ;; e) ENVIRON=${OPTARG} ;; c) CLONE_NEW_OOM=${OPTARG} ;; d) DELETE_PREV_OOM=${OPTARG} ;; w) APPLY_WORKAROUNDS=${OPTARG} ;; r) REMOVE_OOM_AT_END=${OPTARG} ;; ?) usage exit ;; esac done if [[ -z $BRANCH ]]; then usage exit 1 fi deploy_onap $BRANCH $ENVIRON $CLONE_NEW_OOM $DELETE_PREV_OOM $APPLY_WORKAROUNDS $REMOVE_OOM_AT_END printf "**** Done ****\n"