aboutsummaryrefslogtreecommitdiffstats
path: root/TOSCA/kubernetes-cluster-TOSCA/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'TOSCA/kubernetes-cluster-TOSCA/scripts')
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/create.py93
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/configure.py175
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/start.py153
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_node/configure.py88
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/nfs.sh29
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/tasks.py43
6 files changed, 581 insertions, 0 deletions
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/create.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/create.py
new file mode 100644
index 0000000000..4bb3710f06
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/create.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# This tack will be triggered after VM created. It will check whether docker is up and running.
+
+import subprocess
+from cloudify import ctx
+from cloudify.exceptions import OperationRetry
+
+
+def check_command(command):
+
+ try:
+ process = subprocess.Popen(
+ command.split()
+ )
+ except OSError:
+ return False
+
+ output, error = process.communicate()
+
+ ctx.logger.debug('command: {0} '.format(command))
+ ctx.logger.debug('output: {0} '.format(output))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Running `{0}` returns error.'.format(command))
+ return False
+
+ return True
+
+
+def execute_command(_command):
+
+ ctx.logger.debug('_command {0}.'.format(_command))
+
+ subprocess_args = {
+ 'args': _command.split(),
+ 'stdout': subprocess.PIPE,
+ 'stderr': subprocess.PIPE
+ }
+
+ ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+ process = subprocess.Popen(**subprocess_args)
+ output, error = process.communicate()
+
+ ctx.logger.debug('command: {0} '.format(_command))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Running `{0}` returns error.'.format(_command))
+ return False
+
+ return output
+
+
+if __name__ == '__main__':
+
+ # Check if Docker PS works
+ docker = check_command('docker ps')
+ if not docker:
+ raise OperationRetry(
+ 'Docker is not present on the system.')
+ ctx.logger.info('Docker is present on the system.')
+
+ # Next check if Cloud Init is running.
+ finished = False
+ ps = execute_command('ps -ef')
+ for line in ps.split('\n'):
+ if '/usr/bin/python /usr/bin/cloud-init modules' in line:
+ raise OperationRetry(
+ 'You provided a Cloud-init Cloud Config to configure instances. '
+ 'Waiting for Cloud-init to complete.')
+ ctx.logger.info('Cloud-init finished.')
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/configure.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/configure.py
new file mode 100644
index 0000000000..7d5dffcc57
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/configure.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# This script will be executed on Kubernetes master host. It will initialize the master, and install a pod network.
+
+import pwd
+import grp
+import os
+import re
+import getpass
+import subprocess
+from cloudify import ctx
+from cloudify.exceptions import OperationRetry
+from cloudify_rest_client.exceptions import CloudifyClientError
+
+JOIN_COMMAND_REGEX = '^kubeadm join[\sA-Za-z0-9\.\:\-\_]*'
+BOOTSTRAP_TOKEN_REGEX = '[a-z0-9]{6}.[a-z0-9]{16}'
+IP_PORT_REGEX = '[0-9]+(?:\.[0-9]+){3}:[0-9]+'
+NOT_SHA_REGEX='^(?!.*sha256)'
+JCRE_COMPILED = re.compile(JOIN_COMMAND_REGEX)
+BTRE_COMPILED = re.compile(BOOTSTRAP_TOKEN_REGEX)
+IPRE_COMPILED = re.compile(IP_PORT_REGEX)
+SHA_COMPILED=re.compile(NOT_SHA_REGEX)
+
+def execute_command(_command):
+
+ ctx.logger.debug('_command {0}.'.format(_command))
+
+ subprocess_args = {
+ 'args': _command.split(),
+ 'stdout': subprocess.PIPE,
+ 'stderr': subprocess.PIPE
+ }
+
+ ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+ process = subprocess.Popen(**subprocess_args)
+ output, error = process.communicate()
+
+ ctx.logger.debug('command: {0} '.format(_command))
+ ctx.logger.debug('output: {0} '.format(output))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Running `{0}` returns error.'.format(_command))
+ return False
+
+ return output
+
+
+def cleanup_and_retry():
+ reset_cluster_command = 'sudo kubeadm reset'
+ output = execute_command(reset_cluster_command)
+ ctx.logger.info('reset_cluster_command {1}'.format(reset_cluster_command, output))
+ raise OperationRetry('Restarting kubernetes because of a problem.')
+
+
+def configure_admin_conf():
+ # Add the kubeadmin config to environment
+ agent_user = getpass.getuser()
+ uid = pwd.getpwnam(agent_user).pw_uid
+ gid = grp.getgrnam('docker').gr_gid
+ admin_file_dest = os.path.join(os.path.expanduser('~'), 'admin.conf')
+
+ execute_command('sudo cp {0} {1}'.format('/etc/kubernetes/admin.conf', admin_file_dest))
+ execute_command('sudo chown {0}:{1} {2}'.format(uid, gid, admin_file_dest))
+
+ with open(os.path.join(os.path.expanduser('~'), '.bashrc'), 'a') as outfile:
+ outfile.write('export KUBECONFIG=$HOME/admin.conf')
+ os.environ['KUBECONFIG'] = admin_file_dest
+
+
+def setup_secrets(_split_master_port, _bootstrap_token):
+ master_ip = split_master_port[0]
+ master_port = split_master_port[1]
+ ctx.instance.runtime_properties['master_ip'] = _split_master_port[0]
+ ctx.instance.runtime_properties['master_port'] = _split_master_port[1]
+ ctx.instance.runtime_properties['bootstrap_token'] = _bootstrap_token
+ from cloudify import manager
+ cfy_client = manager.get_rest_client()
+
+ _secret_key = 'kubernetes_master_ip'
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=master_ip)
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=master_ip)
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+
+ _secret_key = 'kubernetes_master_port'
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=master_port)
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=master_port)
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+
+ _secret_key = 'bootstrap_token'
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=_bootstrap_token)
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=_bootstrap_token)
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+
+
+if __name__ == '__main__':
+
+ ctx.instance.runtime_properties['KUBERNETES_MASTER'] = True
+ cniCommand1=subprocess.Popen(["sudo", "sysctl", 'net.bridge.bridge-nf-call-iptables=1'], stdout=subprocess.PIPE)
+ # Start Kubernetes Master
+ ctx.logger.info('Attempting to start Kubernetes master.')
+ start_master_command = 'sudo kubeadm init'
+ start_output = execute_command(start_master_command)
+ ctx.logger.debug('start_master_command output: {0}'.format(start_output))
+ # Check if start succeeded.
+ if start_output is False or not isinstance(start_output, basestring):
+ ctx.logger.error('Kubernetes master failed to start.')
+ cleanup_and_retry()
+ ctx.logger.info('Kubernetes master started successfully.')
+
+ # Slice and dice the start_master_command start_output.
+ ctx.logger.info('Attempting to retrieve Kubernetes cluster information.')
+ split_start_output = \
+ [line.strip() for line in start_output.split('\n') if line.strip()]
+ del line
+
+ ctx.logger.debug(
+ 'Kubernetes master start output, split and stripped: {0}'.format(
+ split_start_output))
+ split_join_command = ''
+ for li in split_start_output:
+ ctx.logger.debug('li in split_start_output: {0}'.format(li))
+ if re.match(JCRE_COMPILED, li):
+ split_join_command = re.split('\s', li)
+ del li
+ ctx.logger.info('split_join_command: {0}'.format(split_join_command))
+
+ if not split_join_command:
+ ctx.logger.error('No join command in split_start_output: {0}'.format(split_join_command))
+ cleanup_and_retry()
+
+ for li in split_join_command:
+ ctx.logger.info('Sorting bits and pieces: li: {0}'.format(li))
+ if (re.match(BTRE_COMPILED, li) and re.match(SHA_COMPILED, li)):
+ bootstrap_token = li
+ elif re.match(IPRE_COMPILED, li):
+ split_master_port = li.split(':')
+ setup_secrets(split_master_port, bootstrap_token)
+ configure_admin_conf()
+
+ weaveCommand1=subprocess.Popen(["kubectl", "version"], stdout=subprocess.PIPE)
+ weaveCommand2=subprocess.Popen(["base64"],stdin=weaveCommand1.stdout, stdout=subprocess.PIPE)
+ kubever = weaveCommand2.communicate()[0]
+ kubever = kubever.replace('\n', '').replace('\r', '')
+ ctx.logger.info("kubever :"+kubever)
+ weaveURL=('https://cloud.weave.works/k8s/net?k8s-version={0}'.format(kubever))
+ ctx.logger.info("weaveURL:" + weaveURL)
+ weaveCommand4=subprocess.Popen(["kubectl","apply","-f",weaveURL],stdout=subprocess.PIPE)
+ weaveResult= weaveCommand4.communicate()[0]
+ ctx.logger.info("weaveResult :"+weaveResult)
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/start.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/start.py
new file mode 100644
index 0000000000..bbc166b134
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/start.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+#This script will be execute on master host. This script will check whether Kube-DNS is running, and set secrets in cloudify.
+
+import os
+import subprocess
+import pip
+try:
+ import yaml
+except ImportError:
+ pip.main(['install', 'pyyaml'])
+ import yaml
+
+from cloudify import ctx
+from cloudify.exceptions import RecoverableError
+from cloudify import manager
+
+
+def execute_command(_command):
+
+ ctx.logger.debug('_command {0}.'.format(_command))
+
+ subprocess_args = {
+ 'args': _command.split(),
+ 'stdout': subprocess.PIPE,
+ 'stderr': subprocess.PIPE
+ }
+
+ ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+ process = subprocess.Popen(**subprocess_args)
+ output, error = process.communicate()
+
+ ctx.logger.debug('command: {0} '.format(_command))
+ ctx.logger.debug('output: {0} '.format(output))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Running `{0}` returns error.'.format(_command))
+ return False
+
+ return output
+
+
+def check_kubedns_status(_get_pods):
+
+ ctx.logger.debug('get_pods: {0} '.format(_get_pods))
+
+ for pod_line in _get_pods.split('\n'):
+ ctx.logger.debug('pod_line: {0} '.format(pod_line))
+ try:
+ _namespace, _name, _ready, _status, _restarts, _age = pod_line.split()
+ except ValueError:
+ pass
+ else:
+ if 'kube-dns' in _name and 'Running' not in _status:
+ return False
+ elif 'kube-dns' in _name and 'Running' in _status:
+ return True
+ return False
+
+
+if __name__ == '__main__':
+
+ cfy_client = manager.get_rest_client()
+
+ # Checking if the Kubernetes DNS service is running (last step).
+ admin_file_dest = os.path.join(os.path.expanduser('~'), 'admin.conf')
+ os.environ['KUBECONFIG'] = admin_file_dest
+ get_pods = execute_command('kubectl get pods --all-namespaces')
+ if not check_kubedns_status(get_pods):
+ raise RecoverableError('kube-dns not Running')
+
+ # Storing the K master configuration.
+ kubernetes_master_config = {}
+ with open(admin_file_dest, 'r') as outfile:
+ try:
+ kubernetes_master_config = yaml.load(outfile)
+ except yaml.YAMLError as e:
+ RecoverableError(
+ 'Unable to read Kubernetes Admin file: {0}: {1}'.format(
+ admin_file_dest, str(e)))
+ ctx.instance.runtime_properties['configuration_file_content'] = \
+ kubernetes_master_config
+
+ clusters = kubernetes_master_config.get('clusters')
+ _clusters = {}
+ for cluster in clusters:
+ __name = cluster.get('name')
+ _cluster = cluster.get('cluster', {})
+ _secret_key = '%s_certificate_authority_data' % __name
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=_cluster.get('certificate-authority-data'))
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=_cluster.get('certificate-authority-data'))
+ ctx.instance.runtime_properties['%s_certificate_authority_data' % __name] = _cluster.get('certificate-authority-data')
+ _clusters[__name] = _cluster
+ del __name
+
+ contexts = kubernetes_master_config.get('contexts')
+ _contexts = {}
+ for context in contexts:
+ __name = context.get('name')
+ _context = context.get('context', {})
+ _contexts[__name] = _context
+ del __name
+
+ users = kubernetes_master_config.get('users')
+ _users = {}
+ for user in users:
+ __name = user.get('name')
+ _user = user.get('user', {})
+ _secret_key = '%s_client_certificate_data' % __name
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=_user.get('client-certificate-data'))
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=_user.get('client-certificate-data'))
+ _secret_key = '%s_client_key_data' % __name
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=_user.get('client-key-data'))
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=_user.get('client-key-data'))
+ ctx.instance.runtime_properties['%s_client_certificate_data' % __name] = _user.get('client-certificate-data')
+ ctx.instance.runtime_properties['%s_client_key_data' % __name] = _user.get('client-key-data')
+ _users[__name] = _user
+ del __name
+
+ ctx.instance.runtime_properties['kubernetes'] = {
+ 'clusters': _clusters,
+ 'contexts': _contexts,
+ 'users': _users
+ }
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_node/configure.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_node/configure.py
new file mode 100644
index 0000000000..69faaa80d1
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_node/configure.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# Afther K8s master up and running. This script will be triggered in each worker nodes. It will join the nodes, and mount the NFS directory.
+
+import subprocess
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+START_COMMAND = 'sudo kubeadm join --token {0} {1}:{2}'
+
+
+def execute_command(_command):
+
+ ctx.logger.debug('_command {0}.'.format(_command))
+
+ subprocess_args = {
+ 'args': _command.split(),
+ 'stdout': subprocess.PIPE,
+ 'stderr': subprocess.PIPE
+ }
+
+ ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+ process = subprocess.Popen(**subprocess_args)
+ output, error = process.communicate()
+
+ ctx.logger.debug('command: {0} '.format(_command))
+ ctx.logger.debug('output: {0} '.format(output))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Running `{0}` returns error.'.format(_command))
+ return False
+
+ return output
+
+
+if __name__ == '__main__':
+
+ hostname = execute_command('hostname')
+ ctx.instance.runtime_properties['hostname'] = hostname.rstrip('\n')
+
+ # Get the master cluster info.
+ masters = \
+ [x for x in ctx.instance.relationships if
+ x.target.instance.runtime_properties.get(
+ 'KUBERNETES_MASTER', False)]
+ if len(masters) != 1:
+ raise NonRecoverableError(
+ 'Currently, a Kubernetes node must have a '
+ 'dependency on one Kubernetes master.')
+ master = masters[0]
+ bootstrap_token = \
+ master.target.instance.runtime_properties['bootstrap_token']
+ master_ip = \
+ master.target.instance.runtime_properties['master_ip']
+ master_port = \
+ master.target.instance.runtime_properties['master_port']
+
+ # Join the cluster.
+ cniCommand1=subprocess.Popen(["sudo", "sysctl", 'net.bridge.bridge-nf-call-iptables=1'], stdout=subprocess.PIPE)
+ join_command = \
+ 'sudo kubeadm join --token {0} {1}:{2}'.format(
+ bootstrap_token, master_ip, master_port)
+ execute_command(join_command)
+
+ #mount
+ mount_command=\
+ 'sudo mount -t nfs -o proto=tcp,port=2049 {0}:/dockerdata-nfs /dockerdata-nfs'.format(master_ip)
+ execute_command(mount_command) \ No newline at end of file
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/nfs.sh b/TOSCA/kubernetes-cluster-TOSCA/scripts/nfs.sh
new file mode 100644
index 0000000000..2d59acd99d
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/nfs.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# this script will set the NFS server on k8s master.
+
+mkdir -p /dockerdata-nfs
+chmod 777 /dockerdata-nfs
+yum -y install nfs-utils
+systemctl enable nfs-server.service
+systemctl start nfs-server.service
+echo "/dockerdata-nfs *(rw,no_root_squash,no_subtree_check)" |sudo tee --append /etc/exports
+echo "/home/centos/dockerdata-nfs /dockerdata-nfs none bind 0 0" |sudo tee --append /etc/fstab
+exportfs -a \ No newline at end of file
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/tasks.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/tasks.py
new file mode 100644
index 0000000000..7680fac957
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/tasks.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# here we define some tasks
+
+from fabric.api import run
+
+
+def label_node(labels, hostname):
+ if labels:
+ label_list = []
+ for key, value in labels.items():
+ label_pair_string = '%s=%s' % (key, value)
+ label_list.append(label_pair_string)
+ label_string = ' '.join(label_list)
+ command = 'kubectl label nodes %s %s' % (hostname, label_string)
+ run(command)
+
+
+def stop_node(hostname):
+ command = 'kubectl drain %s' % (hostname)
+ run(command)
+
+
+def delete_node(hostname):
+ command = 'kubectl delete no %s' % (hostname)
+ run(command)