From 94a4ef60e82e414fc04ba290c54f07fb24e99ca5 Mon Sep 17 00:00:00 2001 From: Lusheng Ji Date: Mon, 16 Oct 2017 22:14:08 -0400 Subject: Upload raw files recuresively with path Issue-Id: DCAEGEN2-128 Change-Id: I05c7ab91828224ccf87326c8980c2459290c56dc Signed-off-by: Lusheng Ji --- mvn-phase-lib.sh | 11 +- mvn-phase-script.sh | 4 +- scripts/cdap-init.sh | 392 ------------------------------ scripts/cloud_init/cdap-init.sh | 392 ++++++++++++++++++++++++++++++ scripts/cloud_init/instconsulagentub16.sh | 51 ++++ scripts/instconsulagentub16.sh | 51 ---- 6 files changed, 455 insertions(+), 446 deletions(-) delete mode 100644 scripts/cdap-init.sh create mode 100644 scripts/cloud_init/cdap-init.sh create mode 100644 scripts/cloud_init/instconsulagentub16.sh delete mode 100644 scripts/instconsulagentub16.sh diff --git a/mvn-phase-lib.sh b/mvn-phase-lib.sh index b9e764c..8854944 100755 --- a/mvn-phase-lib.sh +++ b/mvn-phase-lib.sh @@ -241,6 +241,8 @@ upload_raw_file() OUTPUT_FILE_TYPE='application/json' elif [ "$EXT" == 'sh' ]; then OUTPUT_FILE_TYPE='text/x-shellscript' + elif [ "$EXT" == 'py' ]; then + OUTPUT_FILE_TYPE='text/x-python' elif [ "$EXT" == 'gz' ]; then OUTPUT_FILE_TYPE='application/gzip' elif [ "$EXT" == 'wgn' ]; then @@ -263,7 +265,7 @@ upload_raw_file() fi echo "Sending ${OUTPUT_FILE} to Nexus: ${SEND_TO}" - curl -vkn --netrc-file "${NETRC}" --upload-file "${OUTPUT_FILE}" -X PUT -H "Content-Type: $OUTPUT_FILE_TYPE" "${SEND_TO}/${OUTPUT_FILE}-${MVN_PROJECT_VERSION}-${TIMESTAMP}" + #curl -vkn --netrc-file "${NETRC}" --upload-file "${OUTPUT_FILE}" -X PUT -H "Content-Type: $OUTPUT_FILE_TYPE" "${SEND_TO}/${OUTPUT_FILE}-${MVN_PROJECT_VERSION}-${TIMESTAMP}" curl -vkn --netrc-file "${NETRC}" --upload-file "${OUTPUT_FILE}" -X PUT -H "Content-Type: $OUTPUT_FILE_TYPE" "${SEND_TO}/${OUTPUT_FILE}-${MVN_PROJECT_VERSION}" curl -vkn --netrc-file "${NETRC}" --upload-file "${OUTPUT_FILE}" -X PUT -H "Content-Type: $OUTPUT_FILE_TYPE" "${SEND_TO}/${OUTPUT_FILE}" } @@ -290,6 +292,13 @@ upload_files_of_extension() upload_raw_file "$F" done } +upload_files_of_extension_recursively() +{ + FILES=$(find . -name "*.$1") + for F in $FILES ; do + upload_raw_file "$F" + done +} generate_pypirc_then_publish() diff --git a/mvn-phase-script.sh b/mvn-phase-script.sh index 945fccc..db190e0 100755 --- a/mvn-phase-script.sh +++ b/mvn-phase-script.sh @@ -94,8 +94,8 @@ deploy) ;; scripts) # upload all sh file under the root of module - upload_files_of_extension sh - upload_files_of_extension py + upload_files_of_extension_recursively sh + upload_files_of_extension_recursively py ;; *) echo "====> unknown mvn project module" diff --git a/scripts/cdap-init.sh b/scripts/cdap-init.sh deleted file mode 100644 index d3c1b08..0000000 --- a/scripts/cdap-init.sh +++ /dev/null @@ -1,392 +0,0 @@ -# ============LICENSE_START==================================================== -# org.onap.dcae -# ============================================================================= -# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. -# ============================================================================= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============LICENSE_END====================================================== - -set -x -# -# get configuration -# -CODE_SOURCE=$1 -CODE_VERSION=$2 -CLUSTER_INDEX=$3 -CLUSTER_SIZE=$4 -CLUSTER_FQDNS=$5 -CLUSTER_LOCAL_IPS=$6 -CLUSTER_FLOATING_IPS=$7 -DATACENTER=$8 -REGISTERED_NAME=$9 -export JAVA_HOME=/usr/lib/jvm/default-java -md5sum /root/.sshkey/id_rsa | awk '{ print $1 }' >/root/.mysqlpw -chmod 400 /root/.mysqlpw -# -# enable outside apt repositories -# -wget -qO- http://public-repo-1.hortonworks.com/HDP/ubuntu16/2.x/updates/2.6.0.3/hdp.list >/etc/apt/sources.list.d/hdp.list -wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/cask.list >/etc/apt/sources.list.d/cask.list -wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/pubkey.gpg | apt-key add - -apt-key adv --recv-keys --keyserver keyserver.ubuntu.com B9733A7A07513CAD -apt-get update -# -# install software from apt repositories -# -apt-get install -y default-jdk hadoop-hdfs hadoop-mapreduce hive hbase libsnappy-dev liblzo2-dev hadooplzo spark-master spark-python zip unzip -usermod -a -G hadoop hive -if [ $CLUSTER_INDEX -lt 3 ] -then - apt-get install -y zookeeper-server - cat <>/etc/zookeeper/conf/zookeeper-env.sh -export JAVA_HOME=/usr/lib/jvm/default-java -export ZOOCFGDIR=/etc/zookeeper/conf -export ZOO_LOG_DIR=/var/log/zookeeper -export ZOOPIDFILE=/var/run/zookeeper/zookeeper_server.pid -!EOF - mkdir -p /var/lib/zookeeper - chown zookeeper:zookeeper /var/lib/zookeeper - cp /usr/hdp/current/zookeeper-server/etc/init.d/zookeeper-server /etc/init.d/. - update-rc.d zookeeper-server defaults - service zookeeper-server start -fi -if [ $CLUSTER_INDEX -eq 2 ] -then - debconf-set-selections </usr/hdp/current/spark-client/conf/java-opts -echo "export OPTS=\"\${OPTS} -Dhdp.version=$HDPVER\"" >>/etc/cdap/conf/cdap-env.sh -cat >/etc/profile.d/hadoop.sh <<'!EOF' -HADOOP_PREFIX=/usr/hdp/current/hadoop-client -HADOOP_YARN_HOME=/usr/hdp/current/hadoop-yarn-nodemanager -HADOOP_HOME=/usr/hdp/current/hadoop-client -HADOOP_COMMON_HOME=$HADOOP_HOME -HADOOP_CONF_DIR=/etc/hadoop/conf -HADOOP_HDFS_HOME=/usr/hdp/current/hadoop-hdfs-namenode -HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec -YARN_LOG_DIR=/usr/lib/hadoop-yarn/logs -HADOOP_LOG_DIR=/usr/lib/hadoop/logs -JAVA_HOME=/usr/lib/jvm/default-java -JAVA=$JAVA_HOME/bin/java -PATH=$PATH:$HADOOP_HOME/bin -HBASE_LOG_DIR=/usr/lib/hbase/logs -HADOOP_MAPRED_LOG_DIR=/usr/lib/hadoop-mapreduce/logs -HBASE_CONF_DIR=/etc/hbase/conf -export HADOOP_PREFIX HADOOP_HOME HADOOP_COMMON_HOME HADOOP_CONF_DIR HADOOP_HDFS_HOME JAVA_HOME PATH HADOOP_LIBEXEC_DIR JAVA JARN_LOG_DIR HADOOP_LOG_DIR HBASE_LOG_DIR HADOOP_MAPRED_LOG_DIR HBASE_CONF_DIR -!EOF -chmod 755 /etc/profile.d/hadoop.sh -cat >/etc/hadoop/conf/hadoop-env.sh -mv /root/.sshkey /var/lib/hadoop-hdfs/.ssh -cp /var/lib/hadoop-hdfs/.ssh/id_rsa.pub /var/lib/hadoop-hdfs/.ssh/authorized_keys ->/etc/hadoop/conf/dfs.exclude ->/etc/hadoop/conf/yarn.exclude -chown -R hdfs:hadoop /var/lib/hadoop-hdfs/.ssh /hadoop /usr/lib/hadoop -chown -R yarn:hadoop /usr/lib/hadoop-yarn /hadoop/yarn -chown -R mapred:hadoop /usr/lib/hadoop-mapreduce -chown -R hbase:hbase /usr/lib/hbase -chmod 700 /var/lib/hadoop-hdfs/.ssh -chmod 600 /var/lib/hadoop-hdfs/.ssh/* -sed -i -e '/maxClientCnxns/d' /etc/zookeeper/conf/zoo.cfg - -cat >/tmp/init.py <\n\n" - for n in m.keys(): - a = a + "\n \n {n}\n {v}\n ".format(n=n,v=m[n]) - a = a + "\n\n" - with open(f, 'w') as xml: - xml.write(a) -pxc('/etc/hadoop/conf/core-site.xml', { - 'fs.defaultFS':'hdfs://cl' - }) -pxc('/etc/hadoop/conf/hdfs-site.xml', { - 'dfs.namenode.datanode.registration.ip-hostname-check':'false', - 'dfs.namenode.name.dir':'/hadoop/hdfs/namenode', - 'dfs.hosts.exclude':'/etc/hadoop/conf/dfs.exclude', - 'dfs.datanode.data.dir':'/hadoop/hdfs/data', - 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode', - 'dfs.nameservices':'cl', - 'dfs.ha.namenodes.cl':'nn1,nn2', - 'dfs.namenode.rpc-address.cl.nn1':localips[0]+':8020', - 'dfs.namenode.rpc-address.cl.nn2':localips[1]+':8020', - 'dfs.namenode.http-address.cl.nn1':localips[0]+':50070', - 'dfs.namenode.http-address.cl.nn2':localips[1]+':50070', - 'dfs.namenode.shared.edits.dir':'qjournal://'+localips[0]+':8485;'+localips[1]+':8485;'+localips[2]+':8485/cl', - 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode', - 'dfs.client.failover.proxy.provider.cl':'org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider', - 'dfs.ha.fencing.methods':'sshfence(hdfs),shell(/bin/true)', - 'dfs.ha.fencing.ssh.private-key-files':'/var/lib/hadoop-hdfs/.ssh/id_rsa', - 'dfs.ha.fencing.ssh.connect-timeout':'30000', - 'dfs.ha.automatic-failover.enabled':'true', - 'ha.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181' - }) -pxc('/etc/hadoop/conf/yarn-site.xml', { - 'yarn.nodemanager.vmem-check-enabled':'false', - 'yarn.application.classpath':'/etc/hadoop/conf,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*', - 'yarn.nodemanager.delete.debug-delay-sec':'43200', - 'yarn.scheduler.minimum-allocation-mb':'512', - 'yarn.scheduler.maximum-allocation-mb':'8192', - 'yarn.nodemanager.local-dirs':'/hadoop/yarn/local', - 'yarn.nodemanager.log-dirs':'/hadoop/yarn/log', - 'yarn.resourcemanager.zk-address':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181', - 'yarn.resourcemanager.ha.enabled':'true', - 'yarn.resourcemanager.ha.rm-ids':'rm1,rm2', - 'yarn.resourcemanager.hostname.rm1':localips[1], - 'yarn.resourcemanager.hostname.rm2':localips[2], - 'yarn.resourcemanager.cluster-id':'cl', - 'yarn.resourcemanager.recovery-enabled':'true', - 'yarn.resourcemanager.store.class':'org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore', - 'yarn.resourcemanager.nodes.exclude-path':'/etc/hadoop/conf/yarn.exclude' - }) -pxc('/etc/hadoop/conf/mapred-site.xml', { - 'mapreduce.application.classpath':'/etc/hadoop/conf,/usr/lib/hadoop/lib/*,/usr/lib/hadoop/*,/usr/hdp/current/hadoop-hdfs-namenode/,/usr/hdp/current/hadoop-hdfs-namenode/lib/*,/usr/hdp/current/hadoop-hdfs-namenode/*,/usr/hdp/current/hadoop-yarn-nodemanager/lib/*,/usr/hdp/current/hadoop-yarn-nodemanager/*,/usr/hdp/current/hadoop-mapreduce-historyserver/lib/*,/usr/hdp/current/hadoop-mapreduce-historyserver/*', - 'mapreduce.jobhistory.intermediate-done-dir':'/mr-history/tmp', - 'mapreduce.jobhistory.done-dir':'/mr-history/done', - 'mapreduce.jobhistory.address':localips[1], - 'mapreduce.jobhistory.webapp.address':localips[1] - }) -pxc('/etc/hbase/conf/hbase-site.xml', { - 'hbase.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181', - 'hbase.rootdir':'hdfs://cl/apps/hbase/data', - 'hbase.cluster.distributed':'true' - }) -pxc('/etc/hive/conf/hive-site.xml', { - 'fs.file.impl.disable.cache':'true', - 'fs.hdfs.impl.disable.cache':'true', - 'hadoop.clientside.fs.operations':'true', - 'hive.auto.convert.join.noconditionaltask.size':'1000000000', - 'hive.auto.convert.sortmerge.join.noconditionaltask':'true', - 'hive.auto.convert.sortmerge.join':'true', - 'hive.enforce.bucketing':'true', - 'hive.enforce.sorting':'true', - 'hive.mapjoin.bucket.cache.size':'10000', - 'hive.mapred.reduce.tasks.speculative.execution':'false', - 'hive.metastore.cache.pinobjtypes':'Table,Database,Type,FieldSchema,Order', - 'hive.metastore.client.socket.timeout':'60s', - 'hive.metastore.local':'true', - 'hive.metastore.uris':'thrift://' + fqdns[2] + ':9083', - 'hive.metastore.warehouse.dir':'/apps/hive/warehouse', - 'hive.optimize.bucketmapjoin.sortedmerge':'true', - 'hive.optimize.bucketmapjoin':'true', - 'hive.optimize.mapjoin.mapreduce':'true', - 'hive.optimize.reducededuplication.min.reducer':'1', - 'hive.security.authorization.manager':'org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider', - 'hive.semantic.analyzer.factory.impl':'org.apache.hivealog.cli.HCatSemanticAnalyzerFactory', - 'javax.jdo.option.ConnectionDriverName':'com.mysql.jdbc.Driver', - 'javax.jdo.option.ConnectionPassword': mysqlpw, - 'javax.jdo.option.ConnectionURL':'jdbc:mysql://localhost:3306/metastore?createDatabaseIfNotExist=true', - 'javax.jdo.option.ConnectionUserName':'root' - }) -if myid == 2: - pxc('/etc/cdap/conf/cdap-site.xml', { - 'zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181/\${root.namespace}', - 'router.server.address':localips[2], - 'explore.enabled':'true', - 'enable.unrecoverable.reset':'true', - 'kafka.seed.brokers':localips[2] + ':9092', - 'app.program.jvm.opts':'-XX:MaxPermSize=128M \${twill.jvm.gc.opts} -Dhdp.version=$HDPVER -Dspark.yarn.am.extraJavaOptions=-Dhdp.version=$HDPVER' - }) -with open('/etc/hbase/conf/regionservers', 'w') as f: - for ip in localips: - f.write('{ip}\n'.format(ip=ip)) -with open('/etc/hbase/conf/hbase-env.sh', 'a') as f: - f.write("export HBASE_MANAGES_ZK=false\n") -with open('/etc/zookeeper/conf/zoo.cfg', 'a') as f: - f.write("server.1={L1}:2888:3888\nserver.2={L2}:2888:3888\nserver.3={L3}:2888:3888\nmaxClientCnxns=0\nautopurge.purgeInterval=6\n".format(L1=localips[0],L2=localips[1],L3=localips[2])) -with open('/etc/clustermembers', 'w') as f: - f.write("export me={me}\n".format(me=myid)) - for idx in range(len(localips)): - f.write("export n{i}={ip}\n".format(i=idx, ip=localips[idx])) - f.write("export N{i}={ip}\n".format(i=idx, ip=floatingips[idx])) -with open('/etc/hadoop/conf/slaves', 'w') as f: - for idx in range(len(localips)): - if idx != myid: - f.write("{x}\n".format(x=localips[idx])) -if myid < 3: - with open('/var/lib/zookeeper/myid', 'w') as f: - f.write("{id}".format(id=(myid + 1))) - os.system('service zookeeper-server restart') -for ip in localips: - os.system("su - hdfs -c \"ssh -o StrictHostKeyChecking=no -o NumberOfPasswordPrompts=0 {ip} echo Connectivity to {ip} verified\"".format(ip=ip)) -!EOF - -python /tmp/init.py - -. /etc/clustermembers -waitfor() { - while ( ! nc $1 $2 >/var/log/hive/hive.out 2>>/var/log/hive/hive.log /tmp/cinst.sh - wget -qO- $CODE_SOURCE/${CODE_VERSION}/instconsulagentub16.sh >/tmp/cinst.sh - bash /tmp/cinst.sh <>/etc/clustermembers -fi diff --git a/scripts/cloud_init/cdap-init.sh b/scripts/cloud_init/cdap-init.sh new file mode 100644 index 0000000..d3c1b08 --- /dev/null +++ b/scripts/cloud_init/cdap-init.sh @@ -0,0 +1,392 @@ +# ============LICENSE_START==================================================== +# org.onap.dcae +# ============================================================================= +# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. +# ============================================================================= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END====================================================== + +set -x +# +# get configuration +# +CODE_SOURCE=$1 +CODE_VERSION=$2 +CLUSTER_INDEX=$3 +CLUSTER_SIZE=$4 +CLUSTER_FQDNS=$5 +CLUSTER_LOCAL_IPS=$6 +CLUSTER_FLOATING_IPS=$7 +DATACENTER=$8 +REGISTERED_NAME=$9 +export JAVA_HOME=/usr/lib/jvm/default-java +md5sum /root/.sshkey/id_rsa | awk '{ print $1 }' >/root/.mysqlpw +chmod 400 /root/.mysqlpw +# +# enable outside apt repositories +# +wget -qO- http://public-repo-1.hortonworks.com/HDP/ubuntu16/2.x/updates/2.6.0.3/hdp.list >/etc/apt/sources.list.d/hdp.list +wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/cask.list >/etc/apt/sources.list.d/cask.list +wget -qO- http://repository.cask.co/ubuntu/precise/amd64/cdap/4.1/pubkey.gpg | apt-key add - +apt-key adv --recv-keys --keyserver keyserver.ubuntu.com B9733A7A07513CAD +apt-get update +# +# install software from apt repositories +# +apt-get install -y default-jdk hadoop-hdfs hadoop-mapreduce hive hbase libsnappy-dev liblzo2-dev hadooplzo spark-master spark-python zip unzip +usermod -a -G hadoop hive +if [ $CLUSTER_INDEX -lt 3 ] +then + apt-get install -y zookeeper-server + cat <>/etc/zookeeper/conf/zookeeper-env.sh +export JAVA_HOME=/usr/lib/jvm/default-java +export ZOOCFGDIR=/etc/zookeeper/conf +export ZOO_LOG_DIR=/var/log/zookeeper +export ZOOPIDFILE=/var/run/zookeeper/zookeeper_server.pid +!EOF + mkdir -p /var/lib/zookeeper + chown zookeeper:zookeeper /var/lib/zookeeper + cp /usr/hdp/current/zookeeper-server/etc/init.d/zookeeper-server /etc/init.d/. + update-rc.d zookeeper-server defaults + service zookeeper-server start +fi +if [ $CLUSTER_INDEX -eq 2 ] +then + debconf-set-selections </usr/hdp/current/spark-client/conf/java-opts +echo "export OPTS=\"\${OPTS} -Dhdp.version=$HDPVER\"" >>/etc/cdap/conf/cdap-env.sh +cat >/etc/profile.d/hadoop.sh <<'!EOF' +HADOOP_PREFIX=/usr/hdp/current/hadoop-client +HADOOP_YARN_HOME=/usr/hdp/current/hadoop-yarn-nodemanager +HADOOP_HOME=/usr/hdp/current/hadoop-client +HADOOP_COMMON_HOME=$HADOOP_HOME +HADOOP_CONF_DIR=/etc/hadoop/conf +HADOOP_HDFS_HOME=/usr/hdp/current/hadoop-hdfs-namenode +HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec +YARN_LOG_DIR=/usr/lib/hadoop-yarn/logs +HADOOP_LOG_DIR=/usr/lib/hadoop/logs +JAVA_HOME=/usr/lib/jvm/default-java +JAVA=$JAVA_HOME/bin/java +PATH=$PATH:$HADOOP_HOME/bin +HBASE_LOG_DIR=/usr/lib/hbase/logs +HADOOP_MAPRED_LOG_DIR=/usr/lib/hadoop-mapreduce/logs +HBASE_CONF_DIR=/etc/hbase/conf +export HADOOP_PREFIX HADOOP_HOME HADOOP_COMMON_HOME HADOOP_CONF_DIR HADOOP_HDFS_HOME JAVA_HOME PATH HADOOP_LIBEXEC_DIR JAVA JARN_LOG_DIR HADOOP_LOG_DIR HBASE_LOG_DIR HADOOP_MAPRED_LOG_DIR HBASE_CONF_DIR +!EOF +chmod 755 /etc/profile.d/hadoop.sh +cat >/etc/hadoop/conf/hadoop-env.sh +mv /root/.sshkey /var/lib/hadoop-hdfs/.ssh +cp /var/lib/hadoop-hdfs/.ssh/id_rsa.pub /var/lib/hadoop-hdfs/.ssh/authorized_keys +>/etc/hadoop/conf/dfs.exclude +>/etc/hadoop/conf/yarn.exclude +chown -R hdfs:hadoop /var/lib/hadoop-hdfs/.ssh /hadoop /usr/lib/hadoop +chown -R yarn:hadoop /usr/lib/hadoop-yarn /hadoop/yarn +chown -R mapred:hadoop /usr/lib/hadoop-mapreduce +chown -R hbase:hbase /usr/lib/hbase +chmod 700 /var/lib/hadoop-hdfs/.ssh +chmod 600 /var/lib/hadoop-hdfs/.ssh/* +sed -i -e '/maxClientCnxns/d' /etc/zookeeper/conf/zoo.cfg + +cat >/tmp/init.py <\n\n" + for n in m.keys(): + a = a + "\n \n {n}\n {v}\n ".format(n=n,v=m[n]) + a = a + "\n\n" + with open(f, 'w') as xml: + xml.write(a) +pxc('/etc/hadoop/conf/core-site.xml', { + 'fs.defaultFS':'hdfs://cl' + }) +pxc('/etc/hadoop/conf/hdfs-site.xml', { + 'dfs.namenode.datanode.registration.ip-hostname-check':'false', + 'dfs.namenode.name.dir':'/hadoop/hdfs/namenode', + 'dfs.hosts.exclude':'/etc/hadoop/conf/dfs.exclude', + 'dfs.datanode.data.dir':'/hadoop/hdfs/data', + 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode', + 'dfs.nameservices':'cl', + 'dfs.ha.namenodes.cl':'nn1,nn2', + 'dfs.namenode.rpc-address.cl.nn1':localips[0]+':8020', + 'dfs.namenode.rpc-address.cl.nn2':localips[1]+':8020', + 'dfs.namenode.http-address.cl.nn1':localips[0]+':50070', + 'dfs.namenode.http-address.cl.nn2':localips[1]+':50070', + 'dfs.namenode.shared.edits.dir':'qjournal://'+localips[0]+':8485;'+localips[1]+':8485;'+localips[2]+':8485/cl', + 'dfs.journalnode.edits.dir':'/hadoop/hdfs/journalnode', + 'dfs.client.failover.proxy.provider.cl':'org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider', + 'dfs.ha.fencing.methods':'sshfence(hdfs),shell(/bin/true)', + 'dfs.ha.fencing.ssh.private-key-files':'/var/lib/hadoop-hdfs/.ssh/id_rsa', + 'dfs.ha.fencing.ssh.connect-timeout':'30000', + 'dfs.ha.automatic-failover.enabled':'true', + 'ha.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181' + }) +pxc('/etc/hadoop/conf/yarn-site.xml', { + 'yarn.nodemanager.vmem-check-enabled':'false', + 'yarn.application.classpath':'/etc/hadoop/conf,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*', + 'yarn.nodemanager.delete.debug-delay-sec':'43200', + 'yarn.scheduler.minimum-allocation-mb':'512', + 'yarn.scheduler.maximum-allocation-mb':'8192', + 'yarn.nodemanager.local-dirs':'/hadoop/yarn/local', + 'yarn.nodemanager.log-dirs':'/hadoop/yarn/log', + 'yarn.resourcemanager.zk-address':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181', + 'yarn.resourcemanager.ha.enabled':'true', + 'yarn.resourcemanager.ha.rm-ids':'rm1,rm2', + 'yarn.resourcemanager.hostname.rm1':localips[1], + 'yarn.resourcemanager.hostname.rm2':localips[2], + 'yarn.resourcemanager.cluster-id':'cl', + 'yarn.resourcemanager.recovery-enabled':'true', + 'yarn.resourcemanager.store.class':'org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore', + 'yarn.resourcemanager.nodes.exclude-path':'/etc/hadoop/conf/yarn.exclude' + }) +pxc('/etc/hadoop/conf/mapred-site.xml', { + 'mapreduce.application.classpath':'/etc/hadoop/conf,/usr/lib/hadoop/lib/*,/usr/lib/hadoop/*,/usr/hdp/current/hadoop-hdfs-namenode/,/usr/hdp/current/hadoop-hdfs-namenode/lib/*,/usr/hdp/current/hadoop-hdfs-namenode/*,/usr/hdp/current/hadoop-yarn-nodemanager/lib/*,/usr/hdp/current/hadoop-yarn-nodemanager/*,/usr/hdp/current/hadoop-mapreduce-historyserver/lib/*,/usr/hdp/current/hadoop-mapreduce-historyserver/*', + 'mapreduce.jobhistory.intermediate-done-dir':'/mr-history/tmp', + 'mapreduce.jobhistory.done-dir':'/mr-history/done', + 'mapreduce.jobhistory.address':localips[1], + 'mapreduce.jobhistory.webapp.address':localips[1] + }) +pxc('/etc/hbase/conf/hbase-site.xml', { + 'hbase.zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181', + 'hbase.rootdir':'hdfs://cl/apps/hbase/data', + 'hbase.cluster.distributed':'true' + }) +pxc('/etc/hive/conf/hive-site.xml', { + 'fs.file.impl.disable.cache':'true', + 'fs.hdfs.impl.disable.cache':'true', + 'hadoop.clientside.fs.operations':'true', + 'hive.auto.convert.join.noconditionaltask.size':'1000000000', + 'hive.auto.convert.sortmerge.join.noconditionaltask':'true', + 'hive.auto.convert.sortmerge.join':'true', + 'hive.enforce.bucketing':'true', + 'hive.enforce.sorting':'true', + 'hive.mapjoin.bucket.cache.size':'10000', + 'hive.mapred.reduce.tasks.speculative.execution':'false', + 'hive.metastore.cache.pinobjtypes':'Table,Database,Type,FieldSchema,Order', + 'hive.metastore.client.socket.timeout':'60s', + 'hive.metastore.local':'true', + 'hive.metastore.uris':'thrift://' + fqdns[2] + ':9083', + 'hive.metastore.warehouse.dir':'/apps/hive/warehouse', + 'hive.optimize.bucketmapjoin.sortedmerge':'true', + 'hive.optimize.bucketmapjoin':'true', + 'hive.optimize.mapjoin.mapreduce':'true', + 'hive.optimize.reducededuplication.min.reducer':'1', + 'hive.security.authorization.manager':'org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider', + 'hive.semantic.analyzer.factory.impl':'org.apache.hivealog.cli.HCatSemanticAnalyzerFactory', + 'javax.jdo.option.ConnectionDriverName':'com.mysql.jdbc.Driver', + 'javax.jdo.option.ConnectionPassword': mysqlpw, + 'javax.jdo.option.ConnectionURL':'jdbc:mysql://localhost:3306/metastore?createDatabaseIfNotExist=true', + 'javax.jdo.option.ConnectionUserName':'root' + }) +if myid == 2: + pxc('/etc/cdap/conf/cdap-site.xml', { + 'zookeeper.quorum':localips[0]+':2181,'+localips[1]+':2181,'+localips[2]+':2181/\${root.namespace}', + 'router.server.address':localips[2], + 'explore.enabled':'true', + 'enable.unrecoverable.reset':'true', + 'kafka.seed.brokers':localips[2] + ':9092', + 'app.program.jvm.opts':'-XX:MaxPermSize=128M \${twill.jvm.gc.opts} -Dhdp.version=$HDPVER -Dspark.yarn.am.extraJavaOptions=-Dhdp.version=$HDPVER' + }) +with open('/etc/hbase/conf/regionservers', 'w') as f: + for ip in localips: + f.write('{ip}\n'.format(ip=ip)) +with open('/etc/hbase/conf/hbase-env.sh', 'a') as f: + f.write("export HBASE_MANAGES_ZK=false\n") +with open('/etc/zookeeper/conf/zoo.cfg', 'a') as f: + f.write("server.1={L1}:2888:3888\nserver.2={L2}:2888:3888\nserver.3={L3}:2888:3888\nmaxClientCnxns=0\nautopurge.purgeInterval=6\n".format(L1=localips[0],L2=localips[1],L3=localips[2])) +with open('/etc/clustermembers', 'w') as f: + f.write("export me={me}\n".format(me=myid)) + for idx in range(len(localips)): + f.write("export n{i}={ip}\n".format(i=idx, ip=localips[idx])) + f.write("export N{i}={ip}\n".format(i=idx, ip=floatingips[idx])) +with open('/etc/hadoop/conf/slaves', 'w') as f: + for idx in range(len(localips)): + if idx != myid: + f.write("{x}\n".format(x=localips[idx])) +if myid < 3: + with open('/var/lib/zookeeper/myid', 'w') as f: + f.write("{id}".format(id=(myid + 1))) + os.system('service zookeeper-server restart') +for ip in localips: + os.system("su - hdfs -c \"ssh -o StrictHostKeyChecking=no -o NumberOfPasswordPrompts=0 {ip} echo Connectivity to {ip} verified\"".format(ip=ip)) +!EOF + +python /tmp/init.py + +. /etc/clustermembers +waitfor() { + while ( ! nc $1 $2 >/var/log/hive/hive.out 2>>/var/log/hive/hive.log /tmp/cinst.sh + wget -qO- $CODE_SOURCE/${CODE_VERSION}/instconsulagentub16.sh >/tmp/cinst.sh + bash /tmp/cinst.sh <>/etc/clustermembers +fi diff --git a/scripts/cloud_init/instconsulagentub16.sh b/scripts/cloud_init/instconsulagentub16.sh new file mode 100644 index 0000000..3b84b30 --- /dev/null +++ b/scripts/cloud_init/instconsulagentub16.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# ============LICENSE_START==================================================== +# org.onap.dcae +# ============================================================================= +# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. +# ============================================================================= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END====================================================== + +CONSULVER=consul_0.8.3 +CONSULNAME=${CONSULVER}_linux_amd64 +CB=/opt/consul/bin +CD=/opt/consul/data +CF=/opt/consul/config +mkdir -p $CB $CD $CF +cat >$CF/consul.json +cd $CB +wget https://releases.hashicorp.com/consul/${CONSULVER}/${CONSULNAME}.zip +unzip ${CONSULNAME}.zip +rm ${CONSULNAME}.zip +mv consul ${CONSULNAME} +ln -s ${CONSULNAME} consul +cat < /lib/systemd/system/consul.service +[Unit] +Description=Consul +Requires=network-online.target +After=network.target +[Service] +Type=simple +ExecStart=/opt/consul/bin/consul agent -config-dir=/opt/consul/config +ExecReload=/bin/kill -HUP \$MAINPID +[Install] +WantedBy=multi-user.target +EOF +systemctl enable consul +systemctl start consul +until /opt/consul/bin/consul join "dcae-cnsl" +do + echo Waiting to join Consul cluster + sleep 60 +done diff --git a/scripts/instconsulagentub16.sh b/scripts/instconsulagentub16.sh deleted file mode 100644 index 3b84b30..0000000 --- a/scripts/instconsulagentub16.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -# ============LICENSE_START==================================================== -# org.onap.dcae -# ============================================================================= -# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. -# ============================================================================= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============LICENSE_END====================================================== - -CONSULVER=consul_0.8.3 -CONSULNAME=${CONSULVER}_linux_amd64 -CB=/opt/consul/bin -CD=/opt/consul/data -CF=/opt/consul/config -mkdir -p $CB $CD $CF -cat >$CF/consul.json -cd $CB -wget https://releases.hashicorp.com/consul/${CONSULVER}/${CONSULNAME}.zip -unzip ${CONSULNAME}.zip -rm ${CONSULNAME}.zip -mv consul ${CONSULNAME} -ln -s ${CONSULNAME} consul -cat < /lib/systemd/system/consul.service -[Unit] -Description=Consul -Requires=network-online.target -After=network.target -[Service] -Type=simple -ExecStart=/opt/consul/bin/consul agent -config-dir=/opt/consul/config -ExecReload=/bin/kill -HUP \$MAINPID -[Install] -WantedBy=multi-user.target -EOF -systemctl enable consul -systemctl start consul -until /opt/consul/bin/consul join "dcae-cnsl" -do - echo Waiting to join Consul cluster - sleep 60 -done -- cgit 1.2.3-korg