aboutsummaryrefslogtreecommitdiffstats
path: root/cdap3vm/install-steps
diff options
context:
space:
mode:
Diffstat (limited to 'cdap3vm/install-steps')
-rw-r--r--cdap3vm/install-steps/01-generate-host-ids-configs.sh145
-rw-r--r--cdap3vm/install-steps/02-user-creation.sh43
-rw-r--r--cdap3vm/install-steps/04-folder-creation.sh71
-rwxr-xr-xcdap3vm/install-steps/install-cdap-pkgs.sh49
-rwxr-xr-xcdap3vm/install-steps/install-hortonworks-hadoop.sh61
-rw-r--r--cdap3vm/install-steps/utils/boot-time-cdap-vm-N0.sh33
-rw-r--r--cdap3vm/install-steps/utils/boot-time-cdap-vm-N1.sh35
-rw-r--r--cdap3vm/install-steps/utils/boot-time-cdap-vm-N2.sh33
-rw-r--r--cdap3vm/install-steps/utils/cdap-nodetype-N0.sh29
-rw-r--r--cdap3vm/install-steps/utils/cdap-nodetype-N1.sh34
-rw-r--r--cdap3vm/install-steps/utils/cdap-nodetype-N2.sh29
-rw-r--r--cdap3vm/install-steps/utils/cdap-nodetype-NTEST.sh30
-rw-r--r--cdap3vm/install-steps/utils/cdap-vm-services42
-rw-r--r--cdap3vm/install-steps/utils/folder-creation-utils.sh241
-rw-r--r--cdap3vm/install-steps/utils/generate-hosts.sh38
-rw-r--r--cdap3vm/install-steps/utils/replace-kv.py63
16 files changed, 976 insertions, 0 deletions
diff --git a/cdap3vm/install-steps/01-generate-host-ids-configs.sh b/cdap3vm/install-steps/01-generate-host-ids-configs.sh
new file mode 100644
index 0000000..3a53078
--- /dev/null
+++ b/cdap3vm/install-steps/01-generate-host-ids-configs.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+cd $(dirname $0)
+
+CFG=../config
+HOST_XREF=${CFG}/hostname-xref.txt
+CLUSTER_H_TEMPLATE=${CFG}/hadoop-cluster-hosts-file.sh.tmpl
+CLUSTER_HOSTS=${CFG}/hadoop-cluster-hosts-file.sh
+
+HADOOP_CONF_FOLDER=../config
+HADOOP_SCRIPT_FOLDER=../pkgs/services
+HADOOP_SERVICE_SETUP_FOLDER=../pkgs/services-setup
+
+TEMPL_CDAP_CONF=../config/cdap-config-template
+CDAP_CONF_FOLDER=../config/cdap-config
+
+# Hack to set system time to UTC first time
+if [ -e /etc/localtime ]; then
+ rm /etc/localtime # remove it even if it is a symbolic link
+fi
+cp /usr/share/zoneinfo/UTC /etc/localtime
+hwclock --systohc
+
+# Hack for 127.0.1.1 entry in /etc/hosts on Ubuntu images
+#
+sed '/^#/! {/127\.0\.1\.1/ s/^/# /}' -i /etc/hosts
+
+if [ "--no-hosts" = "$1" ]; then
+ echo "Using test hosts"
+ HOST_XREF=${HOST_XREF}.test
+else
+ bash ./utils/generate-hosts.sh > ${HOST_XREF}
+fi
+
+process_template() {
+ template="$1"
+ output_file="$2"
+ xref_file="$3"
+ python ./utils/replace-kv.py ${template} ${output_file} ${xref_file}
+}
+
+process_hadoop_service_scripts() {
+ for x in "$@"; do
+ srcscript=${HADOOP_CONF_FOLDER}/services-templates/"${x}"
+ destscript=${HADOOP_SCRIPT_FOLDER}/"${x}"
+ xref_file=${MAIN_CLUSTER_CONFIG_XREF}
+ process_template ${srcscript} ${destscript} ${xref_file}
+ done
+}
+
+process_hadoop_service_setup_scripts() {
+ for x in "$@"; do
+ srcscript=${HADOOP_CONF_FOLDER}/services-setup-templates/"${x}"
+ destscript=${HADOOP_SERVICE_SETUP_FOLDER}/"${x}"
+ xref_file=${MAIN_CLUSTER_CONFIG_XREF}
+ process_template ${srcscript} ${destscript} ${xref_file}
+ done
+}
+
+
+process_template ${CLUSTER_H_TEMPLATE} ${CLUSTER_HOSTS} ${HOST_XREF}
+
+# CLUSTER_HOSTS now has information on what NODE0, NODE1, etc., mean
+# CLUSTER_HADOOP_CONF has information on folders, etc.
+CLUSTER_HADOOP_CONF=${CFG}/hadoop-cluster-conf-file.sh
+
+MAIN_CLUSTER_CONFIG_XREF=${CFG}/main-conf.sh
+# group them together
+cat ${CLUSTER_HADOOP_CONF} ${CLUSTER_HOSTS} ${HOST_XREF} > ${MAIN_CLUSTER_CONFIG_XREF}
+
+# Create target configs from hadoop-config-templates
+HCFG_TEMPL=${CFG}/hadoop-cluster-config-template
+
+HCFG=${CFG}/hadoop-cluster-config
+
+process_config_files() {
+ sfolder="$1"
+ mkdir -p ${HCFG}/${sfolder}
+ for x in ${HCFG_TEMPL}/${sfolder}/*; do
+ if [ -d $x ]; then continue; fi # skip folder
+ item=${x#$HCFG_TEMPL/}
+ template=${HCFG_TEMPL}/${item}
+ output_file=${HCFG}/${item}
+ process_template ${template} ${output_file} ${MAIN_CLUSTER_CONFIG_XREF}
+ done
+}
+
+process_config_folder() {
+ sfolder="$1"
+ dfolder="$2"
+ mkdir -p ${dfolder}
+ for x in ${sfolder}/*; do
+ if [ -d $x ]; then continue; fi # skip folder
+ item=${x#$sfolder/}
+ template=${x}
+ output_file=${dfolder}/${item}
+ process_template ${template} ${output_file} ${MAIN_CLUSTER_CONFIG_XREF}
+ done
+}
+
+process_config_files core_hadoop
+process_config_files zookeeper
+process_config_files hbase
+
+process_config_folder ${TEMPL_CDAP_CONF} ${CDAP_CONF_FOLDER}
+process_config_folder ${TEMPL_CDAP_CONF}/common ${CDAP_CONF_FOLDER}/common
+
+
+# TODO: We can simply process all files in the template folder for service_scripts?
+process_hadoop_service_scripts \
+ service-start.sh zookeeper.sh resource-manager.sh zookeeper-zkfc.sh \
+ node-manager.sh namenode.sh datanode.sh secondary-namenode.sh \
+ job-history-server.sh hbase-master.sh hbase-regionserver.sh \
+ utility-scripts.sh cdap.sh cdap-vm-services \
+ boot-time-cdap-vm-N0.sh boot-time-cdap-vm-N1.sh boot-time-cdap-vm-N2.sh
+
+
+# TODO: We can simply process all files in the template folder for service_setup_scripts?
+process_hadoop_service_setup_scripts \
+ service-setup.sh namenode-setup.sh zookeeper-start.sh zookeeper-zkfc-start.sh \
+ resource-manager-start.sh node-manager-start.sh datanode-start.sh \
+ job-history-setup-01-as-root.sh job-history-setup-02-as-hdfs.sh \
+ datanode-start.sh hbase-master-start.sh hbase-regionserver-start.sh \
+ cdap-setup.sh cdap-start.sh
+
+chmod -R o+r ${HADOOP_SCRIPT_FOLDER}/..
+
diff --git a/cdap3vm/install-steps/02-user-creation.sh b/cdap3vm/install-steps/02-user-creation.sh
new file mode 100644
index 0000000..b5d6f03
--- /dev/null
+++ b/cdap3vm/install-steps/02-user-creation.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+# Create user accounts for CDAP-Hadoop (3VM minimal) solution
+# one argument after DESC creates a user x1:x1
+# additional "other-groups" argument creates a user x1:x1 -G y1,y2,...
+# additional groups have to be specified in a comma-separated format without spaces
+
+while read desc user othergrouplist; do
+ if [ -z "${othergrouplist}" ]; then
+ useradd $user -m
+ else
+ useradd $user -m
+ usermod -a -G ${othergrouplist} $user
+ fi
+done << __EOF__
+HADOOP hadoop
+HDFS hdfs hadoop
+HBase hbase hadoop
+YARN yarn hadoop
+MapReduce mapred hadoop
+ZooKeeper zookeeper hadoop
+CDAP cdap
+DCAE dcae hdfs
+__EOF__
+
diff --git a/cdap3vm/install-steps/04-folder-creation.sh b/cdap3vm/install-steps/04-folder-creation.sh
new file mode 100644
index 0000000..59ca5cf
--- /dev/null
+++ b/cdap3vm/install-steps/04-folder-creation.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+\. ../config/main-conf.sh
+\. ./utils/folder-creation-utils.sh
+
+HADOOP_SERVICE_SETUP_FOLDER=../pkgs/services-setup
+
+NODETYPE=N$(get_cdap_host_id)
+NODETYPE=${1:-$NODETYPE}
+
+X=$NODETYPE
+if [[ "$X" != "N0" && "$X" != "N1" && "$X" != "N2" && "$X" != "NTEST" ]]; then
+ exit -1
+fi
+
+if [ "$NODETYPE" == "NTEST" ]; then TEST_ZOOKEEPER_ID=0; fi
+
+NODESTATE="$2" # default is inst, but we may have a spare type
+
+CFG=../config
+HCFG_TEMPL=${CFG}/hadoop-cluster-config-template
+
+TEMPL_CDAP_CONF=../config/cdap-config-template
+CDAP_CONF_FOLDER=../config/cdap-config
+
+HADOOP_CONF_FOLDER=../config/hadoop-cluster-config
+HADOOP_SCRIPT_FOLDER=../pkgs/services
+
+IS_INITIAL=1 # Fresh cluster
+MAXHOSTS=3
+
+if [[ -n "$NODESTATE" && "inst" != "$NODESTATE" ]]; then
+ IS_ISINITIAL=
+fi
+
+process_generic_node
+
+# [DE248724] configure tmpfiles.d
+cp ../pkgs/ubuntu-files/cdap-hadoop-run.conf /usr/lib/tmpfiles.d/
+chmod 644 /usr/lib/tmpfiles.d/cdap-hadoop-run.conf
+
+cp ../pkgs/ubuntu-files/create_pid_dirs.sh "${__SERVICE_CONFIG_FOLDER__}"/
+chmod 755 "${__SERVICE_CONFIG_FOLDER__}"/create_pid_dirs.sh
+
+cp $HADOOP_SCRIPT_FOLDER/boot-time-cdap-vm-${NODETYPE}.sh "${__SERVICE_CONFIG_FOLDER__}"/boot-time-cdap-vm.sh
+cp $HADOOP_SCRIPT_FOLDER/cdap-vm-services /etc/init.d
+chmod +x /etc/init.d/cdap-vm-services
+
+# Ubuntu service enable:
+#
+update-rc.d cdap-vm-services defaults
+
+\. ./utils/cdap-nodetype-${NODETYPE}.sh
diff --git a/cdap3vm/install-steps/install-cdap-pkgs.sh b/cdap3vm/install-steps/install-cdap-pkgs.sh
new file mode 100755
index 0000000..c4c7ffa
--- /dev/null
+++ b/cdap3vm/install-steps/install-cdap-pkgs.sh
@@ -0,0 +1,49 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+export JAVA_HOME=/opt/app/java/jdk/jdk170
+PATH=$JAVA_HOME/bin:$PATH
+
+## Assumption: Hadoop is already installed and configured
+#
+# hdfs.namespace is /cdap and the default property hdfs.user is yarn
+#
+sudo su hdfs -c "JAVA_HOME=/opt/app/java/jdk/jdk170 hadoop fs -mkdir -p /cdap && hadoop fs -chown yarn /cdap"
+
+# create a tx.snapshot subdirectory
+#
+sudo su hdfs -c "JAVA_HOME=/opt/app/java/jdk/jdk170 hadoop fs -mkdir -p /cdap/tx.snapshot && hadoop fs -chown yarn /cdap/tx.snapshot"
+
+## Configure apt-get repo for CDAP 3.5.x:
+#
+echo "Configuring CDAP 3.5 apt-get repo..."
+sudo wget -O /etc/apt/sources.list.d/cask.list http://repository.cask.co/ubuntu/precise/amd64/cdap/3.5/cask.list
+
+## Add the Cask Public GPG Key:
+#
+echo "Adding the Cask public GPG key to apt..."
+wget -O - http://repository.cask.co/ubuntu/precise/amd64/cdap/3.5/pubkey.gpg | sudo apt-key add -
+
+## Update apt cache:
+#
+echo "apt-get update..."
+sudo apt-get -y update
+
+## Install CDAP packages:
+echo "installing cdap packages..."
+sudo JAVA_HOME=/opt/app/java/jdk/jdk170 apt-get -y install cdap-gateway cdap-kafka cdap-master cdap-security cdap-ui cdap-cli
diff --git a/cdap3vm/install-steps/install-hortonworks-hadoop.sh b/cdap3vm/install-steps/install-hortonworks-hadoop.sh
new file mode 100755
index 0000000..9e73eeb
--- /dev/null
+++ b/cdap3vm/install-steps/install-hortonworks-hadoop.sh
@@ -0,0 +1,61 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+export JAVA_HOME=/opt/app/java/jdk/jdk170
+
+## Configure apt-get repo for HortonWorks:
+#
+echo "Configuring HortonWorks apt-get repo..."
+sudo wget http://public-repo-1.hortonworks.com/HDP/ubuntu14/2.x/updates/2.4.3.0/hdp.list -O /etc/apt/sources.list.d/hdp.list
+
+## Add the BigTop Public GPG Key:
+#
+# echo "Adding the HortonWorks public GPG key to apt..."
+sudo apt-key adv --keyserver keyserver.ubuntu.com --recv 07513CAD
+
+## Update apt cache:
+#
+echo "apt-get update..."
+sudo apt-get -y update
+
+## List Hadoop packages:
+#
+sudo apt-cache search hadoop
+sudo apt-cache search hbase
+sudo apt-cache search zookeeper
+
+## Install HortonWorks Hadoop packages:
+#
+# sudo JAVA_HOME=/opt/app/java/jdk/jdk170 apt-get install hadoop\*
+
+# sudo JAVA_HOME=/opt/app/java/jdk/jdk170 apt-get --print-uris install \
+
+sudo JAVA_HOME=/opt/app/java/jdk/jdk170 apt-get -y install \
+ hadoop hadoop-client hadoop-hdfs hadoop-hdfs-datanode hadoop-hdfs-namenode \
+ hadoop-hdfs-zkfc hadoop-mapreduce hadoop-mapreduce-historyserver \
+ hadoop-yarn hadoop-yarn-nodemanager \
+ hbase hbase-master hbase-regionserver \
+ zookeeper libhdfs0
+
+## Fix file permissions for domain sockets
+#
+sudo chown hdfs:hadoop /var/lib/hadoop-hdfs
+sudo chown mapred:hadoop /var/lib/hadoop-mapreduce
+sudo chown yarn:hadoop /var/lib/hadoop-yarn
+sudo chmod 775 /var/lib/hadoop-mapreduce
+
diff --git a/cdap3vm/install-steps/utils/boot-time-cdap-vm-N0.sh b/cdap3vm/install-steps/utils/boot-time-cdap-vm-N0.sh
new file mode 100644
index 0000000..d55f6ae
--- /dev/null
+++ b/cdap3vm/install-steps/utils/boot-time-cdap-vm-N0.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+# Customization for this node -- after generic node is processed
+
+MAINDIR=/etc/hadoop/service_scripts/
+
+\. $MAINDIR/utility-scripts.sh
+
+bash $MAINDIR/zookeeper.sh start
+bash $MAINDIR/datanode.sh
+bash $MAINDIR/node-manager.sh
+
+wait_for_hbase_shell_OK
+bash $MAINDIR/hbase-regionserver.sh
+
diff --git a/cdap3vm/install-steps/utils/boot-time-cdap-vm-N1.sh b/cdap3vm/install-steps/utils/boot-time-cdap-vm-N1.sh
new file mode 100644
index 0000000..d8affb9
--- /dev/null
+++ b/cdap3vm/install-steps/utils/boot-time-cdap-vm-N1.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+# Customization for this node -- after generic node is processed
+
+MAINDIR=/etc/hadoop/service_scripts/
+
+\. $MAINDIR/utility-scripts.sh
+
+bash $MAINDIR/zookeeper.sh start
+bash $MAINDIR/datanode.sh
+bash $MAINDIR/resource-manager.sh
+bash $MAINDIR/node-manager.sh
+bash $MAINDIR/job-history-server.sh
+
+wait_for_namenode
+bash $MAINDIR/hbase-master.sh
+bash $MAINDIR/hbase-regionserver.sh
diff --git a/cdap3vm/install-steps/utils/boot-time-cdap-vm-N2.sh b/cdap3vm/install-steps/utils/boot-time-cdap-vm-N2.sh
new file mode 100644
index 0000000..eb9c9f3
--- /dev/null
+++ b/cdap3vm/install-steps/utils/boot-time-cdap-vm-N2.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+# Customization for this node -- after generic node is processed
+
+MAINDIR=/etc/hadoop/service_scripts/
+
+\. $MAINDIR/utility-scripts.sh
+
+bash $MAINDIR/zookeeper.sh start
+bash $MAINDIR/datanode.sh
+bash $MAINDIR/node-manager.sh
+
+wait_for_hbase_shell_OK
+bash $MAINDIR/hbase-regionserver.sh
+bash $MAINDIR/cdap.sh
diff --git a/cdap3vm/install-steps/utils/cdap-nodetype-N0.sh b/cdap3vm/install-steps/utils/cdap-nodetype-N0.sh
new file mode 100644
index 0000000..f6140a4
--- /dev/null
+++ b/cdap3vm/install-steps/utils/cdap-nodetype-N0.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+# Customization for this node -- after generic node is processed
+
+inst_namenode # will do datanode
+inst_yarn_nodemanager
+
+wait_for_hbase_shell_OK
+inst_hbase_regionserver
+
+# inst_cdap # inst_cdap will start CDAP services
diff --git a/cdap3vm/install-steps/utils/cdap-nodetype-N1.sh b/cdap3vm/install-steps/utils/cdap-nodetype-N1.sh
new file mode 100644
index 0000000..4e87124
--- /dev/null
+++ b/cdap3vm/install-steps/utils/cdap-nodetype-N1.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+# Customization for this node -- after generic node is processed
+
+inst_datanode
+inst_yarn_resourcemanager
+inst_yarn_nodemanager
+inst_job_history_server
+
+wait_for_namenode
+sleep 20 # for namenode to create required folders; also hbase master needs namenode
+
+inst_hbase_master
+inst_hbase_regionserver
+
+# inst_cdap # inst_cdap will start CDAP services
diff --git a/cdap3vm/install-steps/utils/cdap-nodetype-N2.sh b/cdap3vm/install-steps/utils/cdap-nodetype-N2.sh
new file mode 100644
index 0000000..52ef5fc
--- /dev/null
+++ b/cdap3vm/install-steps/utils/cdap-nodetype-N2.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+# Customization for this node -- after generic node is processed
+
+inst_datanode
+inst_yarn_nodemanager
+
+wait_for_hbase_shell_OK
+inst_hbase_regionserver
+
+inst_cdap
diff --git a/cdap3vm/install-steps/utils/cdap-nodetype-NTEST.sh b/cdap3vm/install-steps/utils/cdap-nodetype-NTEST.sh
new file mode 100644
index 0000000..0ae4b3d
--- /dev/null
+++ b/cdap3vm/install-steps/utils/cdap-nodetype-NTEST.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+# Customization for this node -- after generic node is processed
+
+inst_namenode # will do datanode
+inst_yarn_node_manager
+
+inst_cdap
+inst_yarn_resource_manager
+inst_job_history_server
+inst_hbase_master
+inst_hbase_regionserver
diff --git a/cdap3vm/install-steps/utils/cdap-vm-services b/cdap3vm/install-steps/utils/cdap-vm-services
new file mode 100644
index 0000000..281cd36
--- /dev/null
+++ b/cdap3vm/install-steps/utils/cdap-vm-services
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+# description: Starts and stops the master service
+#
+### BEGIN INIT INFO
+# Provides: cdap-vm-services
+# Short-Description: Hadoop and CDAP services for this CDAP node
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Required-Start: $syslog $remote_fs
+# Required-Stop: $syslog $remote_fs
+# Should-Start:
+# Should-Stop:
+### END INIT INFO
+
+MAINDIR=/etc/hadoop/service-scripts/
+
+if [ "start" = "$1" ]; then
+ bash $MAINDIR/boot-time-cdap-vm.sh
+ ( cd /opt/app/dcae-controller-service-cdap-cluster-manager ; sudo su dcae -c "JAVA_HOME=/opt/app/java/jdk/jdk170 bin/manager.sh start" )
+fi
+
+
+
diff --git a/cdap3vm/install-steps/utils/folder-creation-utils.sh b/cdap3vm/install-steps/utils/folder-creation-utils.sh
new file mode 100644
index 0000000..8a8f82a
--- /dev/null
+++ b/cdap3vm/install-steps/utils/folder-creation-utils.sh
@@ -0,0 +1,241 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+copy_hadoop_conf_files() {
+ srcfolder=${HADOOP_CONF_FOLDER}/"$1"
+ destfolder="$2"
+ cp -a $srcfolder/* $destfolder/
+}
+
+copy_cdap_conf_files() {
+ srcfolder=${CDAP_CONF_FOLDER}
+ destfolder="${__CDAP_CONF_DIR__}"
+ cp -a $srcfolder/cdap-* $srcfolder/log* $destfolder/
+ # Hack for a common.sh file -- TODO: Make it more general
+ cp -a $srcfolder/common/common.sh ${__CDAP_INST_FOLDER__}/ui/bin/common.sh
+}
+
+
+setup_hadoop_service_scripts() {
+ for x in "$@"; do
+ srcscript=${HADOOP_SCRIPT_FOLDER}/"${x}"
+ destscript="${__SERVICE_CONFIG_FOLDER__}/${x}"
+ cp -a $srcscript $destscript
+ done
+}
+
+run_service_setup_script() {
+ script="$1"
+ user="$2"
+ runner=${HADOOP_SERVICE_SETUP_FOLDER}/service-setup.sh
+ srcscript=${HADOOP_SERVICE_SETUP_FOLDER}/"${script}"
+ bash ${runner} ${script} ${user} # should not give full path
+}
+
+create_folder() {
+ # Create a folder with a group and mode
+ name=$1 # if there are spaces, there will be multiple folders
+ user="$2"
+ group="$3"
+ mode="$4"
+
+ mkdir -p $name # if there are spaces, there will be multiple folders
+ chown -R "$user":"$group" $name
+ chmod -R "$mode" $name
+}
+
+inst_namenode() {
+ if [ -n "$IS_INITIAL" ]; then
+ rm -rf ${__DFS_NAME_DIR__} # TODO: Move away from force delete?
+ create_folder ${__DFS_NAME_DIR__} ${__HDFS_USER__} ${__HADOOP_GROUP__} 755
+ run_service_setup_script namenode-setup.sh ${__HDFS_USER__}
+ setup_hadoop_service_scripts namenode.sh
+ else
+ exit -1; # We do not know what to do
+ fi
+}
+
+inst_secondary_namenode() {
+ if [ -n "$IS_INITIAL" ]; then
+ create_folder ${__FS_CHECKPOINT_DIR__} ${__HDFS_USER__} ${__HADOOP_GROUP__} 755
+ setup_hadoop_service_scripts secondary-namenode.sh
+ else
+ exit -1; # We do not know what to do
+ fi
+}
+
+inst_yarn_basic() {
+ create_folder ${__YARN_LOCAL_DIR__} ${__YARN_USER__} ${__HADOOP_GROUP__} 755
+ create_folder ${__YARN_LOCAL_LOG_DIR__} ${__YARN_USER__} ${__HADOOP_GROUP__} 755
+}
+
+inst_datanode_basic() {
+ create_folder ${__HDFS_LOG_DIR__} ${__HDFS_USER__} ${__HADOOP_GROUP__} 755
+ rm -rf ${__DFS_DATA_DIR__} # TODO: Move away from force delete?
+ create_folder ${__DFS_DATA_DIR__} ${__HDFS_USER__} ${__HADOOP_GROUP__} 750
+ setup_hadoop_service_scripts datanode.sh
+}
+
+inst_datanode() {
+ run_service_setup_script datanode-start.sh ${__HDFS_USER__}
+}
+
+inst_hbase_basic() {
+ create_folder ${__HBASE_LOG_DIR__} ${__HBASE_USER__} ${__HADOOP_GROUP__} 750
+ create_folder ${__HBASE_PID_DIR__} ${__HBASE_USER__} ${__HADOOP_GROUP__} 750
+ rm -rf ${__HBASE_CONF_DIR__}
+ create_folder ${__HBASE_CONF_DIR__} ${__HBASE_USER__} ${__HADOOP_GROUP__} 755
+ copy_hadoop_conf_files hbase ${__HBASE_CONF_DIR__}
+}
+
+inst_hbase_master() {
+ run_service_setup_script hbase-master-start.sh ${__HBASE_USER__}
+ setup_hadoop_service_scripts hbase-master.sh
+}
+
+inst_hbase_regionserver() {
+ run_service_setup_script hbase-regionserver-start.sh ${__HBASE_USER__}
+ setup_hadoop_service_scripts hbase-regionserver.sh
+}
+
+inst_core_files() {
+ # For all nodes
+ # YARN logs
+ create_folder ${__YARN_LOG_DIR__} ${__YARN_USER__} ${__HADOOP_GROUP__} 755
+ # HDFS PID folder
+ create_folder ${__HDFS_PID_DIR__} ${__HDFS_USER__} ${__HADOOP_GROUP__} 755
+ # YARN Node Manager Recovery Directory
+ create_folder ${__YARN_NODEMANAGER_RECOVERY_DIR__} ${__YARN_USER__} ${__HADOOP_GROUP__} 755
+ # YARN PID folder
+ create_folder ${__YARN_PID_DIR__} ${__YARN_USER__} ${__HADOOP_GROUP__} 755
+ # JobHistory server logs
+ create_folder ${__MAPRED_LOG_DIR__} ${__MAPRED_USER__} ${__HADOOP_GROUP__} 755
+ # JobHistory PID folder
+ create_folder ${__MAPRED_PID_DIR__} ${__MAPRED_USER__} ${__HADOOP_GROUP__} 755
+ # hadoop conf dir
+ rm -rf ${__HADOOP_CONF_DIR__}
+ create_folder ${__HADOOP_CONF_DIR__} ${__HDFS_USER__} ${__HADOOP_GROUP__} 755
+ # dfs.exclude -- before things are created
+ touch ${__HADOOP_CONF_DIR__}/dfs.exclude
+ copy_hadoop_conf_files core_hadoop ${__HADOOP_CONF_DIR__}
+}
+
+inst_yarn_nodemanager() {
+ run_service_setup_script node-manager-start.sh ${__YARN_USER__}
+ setup_hadoop_service_scripts node-manager.sh
+}
+
+inst_yarn_resourcemanager() {
+ run_service_setup_script resource-manager-start.sh ${__YARN_USER__}
+ setup_hadoop_service_scripts resource-manager.sh
+}
+
+inst_job_history_server() {
+ run_service_setup_script job-history-setup-01-as-root.sh # no need for username then
+ run_service_setup_script job-history-setup-02-as-hdfs.sh ${__HDFS_USER__}
+ setup_hadoop_service_scripts job-history-server.sh
+}
+
+inst_cdap() {
+ # note: cluster-prep is done along with namenode
+ run_service_setup_script cdap-setup.sh # as root
+ setup_hadoop_service_scripts cdap.sh
+ copy_cdap_conf_files
+ run_service_setup_script cdap-start.sh # as root
+}
+
+inst_zookeeper() {
+ # Zookeeper data folder
+ create_folder ${__ZOOKEEPER_DATA_DIR__} ${__ZOOKEEPER_USER__} ${__HADOOP_GROUP__} 755
+ # Zookeeper logs
+ create_folder ${__ZOOKEEPER_LOG_DIR__} ${__ZOOKEEPER_USER__} ${__HADOOP_GROUP__} 755
+ create_folder ${__ZKFC_LOG_DIR__} ${__HDFS_USER__} ${__HADOOP_GROUP__} 755
+ # Zookeeper PID folder
+ create_folder ${__ZOOKEEPER_PID_DIR__} ${__ZOOKEEPER_USER__} ${__HADOOP_GROUP__} 755
+ # Put the Zookeeper ID into its folder
+ get_zookeeper_id > ${__ZOOKEEPER_DATA_DIR__}/myid
+ # Clean up Zookeeper conf folder
+ rm -rf ${__ZOOKEEPER_CONF_DIR__}
+ create_folder ${__ZOOKEEPER_CONF_DIR__} ${__ZOOKEEPER_USER__} ${__HADOOP_GROUP__} 755
+ chmod 755 ${__ZOOKEEPER_CONF_DIR__}/.. # Parent folder of zookeeper
+ # Copy zookeeper files
+ copy_hadoop_conf_files zookeeper ${__ZOOKEEPER_CONF_DIR__}
+ run_service_setup_script zookeeper-start.sh ${__ZOOKEEPER_USER__}
+ run_service_setup_script zookeeper-zkfc-start.sh ${__HDFS_USER__}
+}
+
+get_cdap_host_id() {
+ # we will set this based on the hostname
+ base=$(hostname -s)
+ base=$(echo $base | sed 's/^.*[^0-9]\([0-9][0-9]*\)/\1/g')
+ base=${base: -1} # get last digit. TODO: Need to make it scale?
+ echo $base
+}
+
+get_zookeeper_id() {
+ echo $(get_cdap_host_id)
+}
+
+get_zookeeper_id_single_digit() {
+ base=$(get_cdap_host_id)
+ echo ${base: -1}
+}
+
+process_generic_node() {
+
+ create_folder ${__SERVICE_CONFIG_FOLDER__} root root 755
+
+ inst_core_files # config files, creating, copying etc.
+ setup_hadoop_service_scripts utility-scripts.sh
+ setup_hadoop_service_scripts service-start.sh
+ setup_hadoop_service_scripts zookeeper.sh
+ setup_hadoop_service_scripts zookeeper-zkfc.sh
+
+ inst_zookeeper
+ inst_datanode_basic
+ inst_hbase_basic
+ inst_yarn_basic
+}
+
+wait_for_remote_service() {
+ remote_host="$1"
+ remote_port="$2"
+ sleep_time=${3:-30} # default of 30 seconds between retries
+
+ # keep checking remote_host's remote_port every sleep_time seconds till we get a connection
+ while ( ! nc $remote_host $remote_port < /dev/null ); do sleep $sleep_time ; done
+}
+
+wait_for_namenode() {
+ # keep checking namenode's port 8020 till it is up -- do it every 30 seconds
+ wait_for_remote_service ${__HDP_NAMENODE__} 8020 30
+}
+
+wait_for_hbase_master_start() {
+ # keep checking hbase master's port 16000 till it is up -- do it every 30 seconds
+ wait_for_remote_service ${__HDP_HBASE_MASTER__} 16000 30
+}
+
+wait_for_hbase_shell_OK() {
+ # run hbase shell and see if we connect to hbase... Better than waiting for ports
+ while ( echo list | hbase shell 2>&1 | grep ^ERROR > /dev/null ); do
+ sleep 30
+ done
+}
diff --git a/cdap3vm/install-steps/utils/generate-hosts.sh b/cdap3vm/install-steps/utils/generate-hosts.sh
new file mode 100644
index 0000000..b080a1e
--- /dev/null
+++ b/cdap3vm/install-steps/utils/generate-hosts.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+MAXHOSTID=2
+
+fullhostname=$(hostname -f)
+basehostname=${fullhostname%%.*}
+restname=${fullhostname#*.}
+
+basenum=$(echo $base | sed 's/^.*[^0-9]\([0-9][0-9]*\)/\1/g')
+basenum=${basenum##0*} # remove leading zeros
+
+if [[ "${basenum}" -ge 0 && "${basenum}" -le ${MAXHOSTS} ]]; then
+ for x in $(seq 0 $MAXHOSTID); do
+ echo "__DCAE_CDAP_NODE${x}__=${basehostname::-1}${x}"
+ # echo "__DCAE_CDAP_NODE${x}__=${basehostname::-1}${x}.${restname}"
+ # echo "_CLUSTER_SERVER_PREFIX__0${x}=${basehostname::-1}${x}.${restname}"
+ done
+fi
+
+
diff --git a/cdap3vm/install-steps/utils/replace-kv.py b/cdap3vm/install-steps/utils/replace-kv.py
new file mode 100644
index 0000000..acd9782
--- /dev/null
+++ b/cdap3vm/install-steps/utils/replace-kv.py
@@ -0,0 +1,63 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.
+
+from __future__ import print_function
+import os
+import re
+import sys
+
+"""
+Replace patterns in a file with the corresponding replacements.
+
+Args: OriginalFile OutFile XRefFile
+"""
+
+
+def process_kv_file(fname):
+ """
+ Read a simple properties file, X=Y type; file may contain comment lines and blanks. Return a dict
+ Leading comments are '^\s*#', while blank lines aare '^\s*$'
+ Key cannot contain '=', but value can
+ """
+ ignore_pattern = re.compile(r'^\s*(#|$)')
+ with open(fname) as fid:
+ all_lines = [ line.strip() for line in fid if not re.match(ignore_pattern, line) ]
+ return dict( line.partition('=')[::2] for line in all_lines )
+
+
+def replace_kv(fname, outfname, *xref_files):
+ """
+ Read a file, and perform multiple search replace using key-values in xref.
+ Keys have to be simple keys
+ """
+ xref = {}
+ for xref_file in xref_files:
+ xref.update(process_kv_file(xref_file))
+ pattern = re.compile("|".join(xref.keys()))
+ with open(outfname, 'w') as outf:
+ with open(fname) as fid:
+ all_text = fid.read()
+ outf.write(pattern.sub(lambda m: xref[m.group(0)], all_text))
+
+
+if __name__ == "__main__":
+ if len(sys.argv) >= 3:
+ replace_kv(*sys.argv[1:])
+ else:
+ print("Usage: {} <{}> <{}> <{}> [{}]".format(
+ sys.argv[0], "InputFile", "OutPutFile", "XREF-DictFile[s]", "..."), file=sys.stderr);