aboutsummaryrefslogtreecommitdiffstats
path: root/cdap3vm/install-steps/01-generate-host-ids-configs.sh
blob: 3a5307838c57841ef5e201aa1cb2b966bc752f6f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
#!/bin/bash

# ============LICENSE_START==========================================
# ===================================================================
# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
# ===================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#        http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END============================================
# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.

cd $(dirname $0)

CFG=../config
HOST_XREF=${CFG}/hostname-xref.txt
CLUSTER_H_TEMPLATE=${CFG}/hadoop-cluster-hosts-file.sh.tmpl 
CLUSTER_HOSTS=${CFG}/hadoop-cluster-hosts-file.sh 

HADOOP_CONF_FOLDER=../config
HADOOP_SCRIPT_FOLDER=../pkgs/services
HADOOP_SERVICE_SETUP_FOLDER=../pkgs/services-setup

TEMPL_CDAP_CONF=../config/cdap-config-template
CDAP_CONF_FOLDER=../config/cdap-config

# Hack to set system time to UTC first time
if [ -e /etc/localtime ]; then
   rm /etc/localtime # remove it even if it is a symbolic link 
fi
cp /usr/share/zoneinfo/UTC /etc/localtime
hwclock --systohc

# Hack for 127.0.1.1 entry in /etc/hosts on Ubuntu images
#
sed  '/^#/! {/127\.0\.1\.1/ s/^/# /}' -i /etc/hosts

if [ "--no-hosts" = "$1" ]; then 
   echo "Using test hosts"
   HOST_XREF=${HOST_XREF}.test
else
   bash ./utils/generate-hosts.sh > ${HOST_XREF}
fi

process_template() {
   template="$1"
   output_file="$2"
   xref_file="$3"
   python ./utils/replace-kv.py ${template} ${output_file} ${xref_file}
}

process_hadoop_service_scripts() {
   for x in "$@"; do
       srcscript=${HADOOP_CONF_FOLDER}/services-templates/"${x}"
       destscript=${HADOOP_SCRIPT_FOLDER}/"${x}"
       xref_file=${MAIN_CLUSTER_CONFIG_XREF}
       process_template ${srcscript} ${destscript} ${xref_file}
   done
}

process_hadoop_service_setup_scripts() {
   for x in "$@"; do
       srcscript=${HADOOP_CONF_FOLDER}/services-setup-templates/"${x}"
       destscript=${HADOOP_SERVICE_SETUP_FOLDER}/"${x}"
       xref_file=${MAIN_CLUSTER_CONFIG_XREF}
       process_template ${srcscript} ${destscript} ${xref_file}
   done
}


process_template ${CLUSTER_H_TEMPLATE} ${CLUSTER_HOSTS} ${HOST_XREF}

# CLUSTER_HOSTS now has information on what NODE0, NODE1, etc., mean
# CLUSTER_HADOOP_CONF has information on folders, etc.
CLUSTER_HADOOP_CONF=${CFG}/hadoop-cluster-conf-file.sh

MAIN_CLUSTER_CONFIG_XREF=${CFG}/main-conf.sh
# group them together
cat ${CLUSTER_HADOOP_CONF} ${CLUSTER_HOSTS} ${HOST_XREF} > ${MAIN_CLUSTER_CONFIG_XREF}

# Create target configs from hadoop-config-templates
HCFG_TEMPL=${CFG}/hadoop-cluster-config-template

HCFG=${CFG}/hadoop-cluster-config

process_config_files() {
   sfolder="$1"
   mkdir -p ${HCFG}/${sfolder}
   for x in ${HCFG_TEMPL}/${sfolder}/*; do
       if [ -d $x ]; then continue; fi  # skip folder
       item=${x#$HCFG_TEMPL/}
       template=${HCFG_TEMPL}/${item} 
       output_file=${HCFG}/${item}
       process_template ${template} ${output_file} ${MAIN_CLUSTER_CONFIG_XREF}
   done
}

process_config_folder() {
   sfolder="$1"
   dfolder="$2"
   mkdir -p ${dfolder}
   for x in ${sfolder}/*; do
       if [ -d $x ]; then continue; fi  # skip folder
       item=${x#$sfolder/}
       template=${x}
       output_file=${dfolder}/${item}
       process_template ${template} ${output_file} ${MAIN_CLUSTER_CONFIG_XREF}
   done
}

process_config_files core_hadoop 
process_config_files zookeeper
process_config_files hbase

process_config_folder ${TEMPL_CDAP_CONF} ${CDAP_CONF_FOLDER}
process_config_folder ${TEMPL_CDAP_CONF}/common ${CDAP_CONF_FOLDER}/common


# TODO: We can simply process all files in the template folder for service_scripts?
process_hadoop_service_scripts \
   service-start.sh zookeeper.sh resource-manager.sh zookeeper-zkfc.sh \
   node-manager.sh namenode.sh datanode.sh secondary-namenode.sh  \
   job-history-server.sh hbase-master.sh hbase-regionserver.sh \
   utility-scripts.sh cdap.sh cdap-vm-services \
   boot-time-cdap-vm-N0.sh boot-time-cdap-vm-N1.sh boot-time-cdap-vm-N2.sh


# TODO: We can simply process all files in the template folder for service_setup_scripts?
process_hadoop_service_setup_scripts \
   service-setup.sh namenode-setup.sh zookeeper-start.sh zookeeper-zkfc-start.sh \
   resource-manager-start.sh node-manager-start.sh datanode-start.sh \
   job-history-setup-01-as-root.sh job-history-setup-02-as-hdfs.sh \
   datanode-start.sh hbase-master-start.sh hbase-regionserver-start.sh \
   cdap-setup.sh cdap-start.sh

chmod -R o+r ${HADOOP_SCRIPT_FOLDER}/..