summaryrefslogtreecommitdiffstats
path: root/helm/plugin
diff options
context:
space:
mode:
authorjh245g <jh245g@att.com>2018-06-27 14:50:33 -0400
committerJun (Nicolas) Hu <jh245g@att.com>2018-06-27 19:01:20 +0000
commitd9b88cc5ee987f5fed1011583a172f3c76251814 (patch)
tree36668f1ced1b3de0f13a5d6b8a90c495e032c209 /helm/plugin
parentcec8e7c0f90be290796d9a666edeea97e118a26f (diff)
Update helm plugin source code
Change-Id: I1689d8d915c8f18a3e8230dcabb33413a2b9043e Issue-ID: CCSDK-322 Signed-off-by: jh245g <jh245g@att.com>
Diffstat (limited to 'helm/plugin')
-rw-r--r--helm/plugin/__init__.py14
-rw-r--r--helm/plugin/tasks.py305
-rw-r--r--helm/plugin/tests/__init__.py14
-rw-r--r--helm/plugin/tests/blueprint/blueprint.yaml42
-rw-r--r--helm/plugin/tests/blueprint/plugin/test_plugin.yaml20
-rw-r--r--helm/plugin/tests/test_plugin.py47
-rw-r--r--helm/plugin/workflows.py64
7 files changed, 506 insertions, 0 deletions
diff --git a/helm/plugin/__init__.py b/helm/plugin/__init__.py
new file mode 100644
index 0000000..749f68f
--- /dev/null
+++ b/helm/plugin/__init__.py
@@ -0,0 +1,14 @@
+########
+# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
diff --git a/helm/plugin/tasks.py b/helm/plugin/tasks.py
new file mode 100644
index 0000000..8df29ac
--- /dev/null
+++ b/helm/plugin/tasks.py
@@ -0,0 +1,305 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright (c) 2018 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+from cloudify.decorators import operation
+import shutil
+import errno
+import sys
+import pwd
+import grp
+import os
+import re
+import getpass
+import subprocess
+from cloudify import ctx
+from cloudify.exceptions import OperationRetry
+from cloudify_rest_client.exceptions import CloudifyClientError
+import pip
+import json
+import yaml
+import urllib2
+from cloudify.decorators import operation
+from cloudify import exceptions
+from cloudify.exceptions import NonRecoverableError
+
+
+
+def execute_command(_command):
+ ctx.logger.debug('_command {0}.'.format(_command))
+
+ subprocess_args = {
+ 'args': _command.split(),
+ 'stdout': subprocess.PIPE,
+ 'stderr': subprocess.PIPE
+ }
+
+ ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+ process = subprocess.Popen(**subprocess_args)
+ output, error = process.communicate()
+
+ ctx.logger.debug('command: {0} '.format(_command))
+ ctx.logger.debug('output: {0} '.format(output))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Running `{0}` returns error.'.format(_command))
+ return False
+
+ return output
+
+
+def configure_admin_conf():
+ # Add the kubeadmin config to environment
+ agent_user = getpass.getuser()
+ uid = pwd.getpwnam(agent_user).pw_uid
+ gid = grp.getgrnam('docker').gr_gid
+ admin_file_dest = os.path.join(os.path.expanduser('~'), 'admin.conf')
+
+ execute_command('sudo cp {0} {1}'.format('/etc/kubernetes/admin.conf', admin_file_dest))
+ execute_command('sudo chown {0}:{1} {2}'.format(uid, gid, admin_file_dest))
+
+ with open(os.path.join(os.path.expanduser('~'), '.bashrc'), 'a') as outfile:
+ outfile.write('export KUBECONFIG=$HOME/admin.conf')
+ os.environ['KUBECONFIG'] = admin_file_dest
+
+def get_current_helm_value(chart_name):
+ tiller_host= str(ctx.node.properties['tiller-server-ip'])+':'+str(ctx.node.properties['tiller-server-port'])
+ config_dir_root= str(ctx.node.properties['config-dir'])
+ config_dir=config_dir_root+str(ctx.deployment.id)+'/'
+ if str_to_bool(ctx.node.properties['tls-enable']):
+ getValueCommand=subprocess.Popen(["helm", "get","values","-a",chart_name,'--host',tiller_host,'--tls','--tls-ca-cert',config_dir+'ca.cert.pem','--tls-cert',config_dir+'helm.cert.pem','--tls-key',config_dir+'helm.key.pem'], stdout=subprocess.PIPE)
+ else:
+ getValueCommand=subprocess.Popen(["helm", "get","values","-a",chart_name,'--host',tiller_host], stdout=subprocess.PIPE)
+ value=getValueCommand.communicate()[0]
+ valueMap= {}
+ valueMap = yaml.safe_load(value)
+ ctx.instance.runtime_properties['current-helm-value'] = valueMap
+
+def get_helm_history(chart_name):
+ tiller_host= str(ctx.node.properties['tiller-server-ip'])+':'+str(ctx.node.properties['tiller-server-port'])
+ config_dir_root= str(ctx.node.properties['config-dir'])
+ config_dir=config_dir_root+str(ctx.deployment.id)+'/'
+ if str_to_bool(ctx.node.properties['tls-enable']):
+ getHistoryCommand=subprocess.Popen(["helm", "history",chart_name,'--host',tiller_host,'--tls','--tls-ca-cert',config_dir+'ca.cert.pem','--tls-cert',config_dir+'helm.cert.pem','--tls-key',config_dir+'helm.key.pem'], stdout=subprocess.PIPE)
+ else:
+ getHistoryCommand=subprocess.Popen(["helm", "history",chart_name,'--host',tiller_host], stdout=subprocess.PIPE)
+ history=getHistoryCommand.communicate()[0]
+ history_start_output = [line.strip() for line in history.split('\n') if line.strip()]
+ for index in range(len(history_start_output)):
+ history_start_output[index]=history_start_output[index].replace('\t',' ')
+ ctx.instance.runtime_properties['helm-history'] = history_start_output
+
+def mergedict(dict1, dict2):
+ for key in dict2.keys():
+ if key not in dict1.keys():
+ dict1[key] = dict2[key]
+ else:
+ if type(dict1[key]) == dict and type(dict2[key]) == dict :
+ mergedict(dict1[key], dict2[key])
+ else:
+ dict1[key] = dict2[key]
+
+def tls():
+ if str_to_bool(ctx.node.properties['tls-enable']):
+ config_dir_root= str(ctx.node.properties['config-dir'])
+ config_dir=config_dir_root+str(ctx.deployment.id)+'/'
+ tls_command= ' --tls --tls-ca-cert '+config_dir+'ca.cert.pem --tls-cert '+config_dir+'helm.cert.pem --tls-key '+config_dir+'helm.key.pem '
+ ctx.logger.debug(tls_command)
+ return tls_command
+ else :
+ return ''
+
+def tiller_host():
+ tiller_host= ' --host '+str(ctx.node.properties['tiller-server-ip'])+':'+str(ctx.node.properties['tiller-server-port'])+' '
+ ctx.logger.debug(tiller_host)
+ return tiller_host
+
+
+def str_to_bool(s):
+ s=str(s)
+ if s == 'True' or s == 'true':
+ return True
+ elif s == 'False' or s== 'false':
+ return False
+ else:
+ raise False
+
+
+@operation
+def config(**kwargs):
+ # create helm value file on K8s master
+ #configPath = ctx.node.properties['config-path']
+ configJson = str(ctx.node.properties['config'])
+ configJsonUrl = str(ctx.node.properties['config-url'])
+ runtime_config = str(ctx.node.properties['runtime-config']) #json
+ componentName = ctx.node.properties['component-name']
+ config_dir_root= str(ctx.node.properties['config-dir'])
+ stable_repo_url = str(ctx.node.properties['stable-repo-url'])
+ ctx.logger.debug("debug "+ configJson + runtime_config )
+ #load input config
+ config_dir=config_dir_root+str(ctx.deployment.id)
+ try:
+ os.makedirs(config_dir)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ ctx.logger.debug('tls-enable type '+str(type(str_to_bool(ctx.node.properties['tls-enable']))) )
+ #create TLS cert files
+ if str_to_bool(ctx.node.properties['tls-enable']):
+ ctx.logger.debug('tls enable' )
+ ca_value = ctx.node.properties['ca']
+ cert_value = ctx.node.properties['cert']
+ key_value = ctx.node.properties['key']
+ ca= open(config_dir+'/ca.cert.pem',"w+")
+ ca.write(ca_value)
+ ca.close()
+ cert= open(config_dir+'/helm.cert.pem',"w+")
+ cert.write(cert_value)
+ cert.close()
+ key= open(config_dir+'/helm.key.pem',"w+")
+ key.write(key_value)
+ key.close()
+ else:
+ ctx.logger.debug('tls disable' )
+
+ # create helm value.yaml file
+ configPath=config_dir_root+str(ctx.deployment.id)+'/'+componentName+'.yaml'
+ ctx.logger.debug(configPath)
+
+ configObj ={}
+ if configJson == '' and configJsonUrl == '':
+ ctx.logger.debug("Will use default HELM value")
+ elif configJson == '' and configJsonUrl != '':
+ response = urllib2.urlopen(configJsonUrl)
+ configObj = json.load(response)
+ elif configJson != '' and configJsonUrl == '':
+ configObj = json.loads(configJson)
+ else:
+ raise NonRecoverableError("Unable to get Json config input")
+
+ # load runtime config
+ ctx.logger.debug("debug check runtime config")
+ if runtime_config == '':
+ ctx.logger.debug("there is no runtime config value")
+ else:
+ runtime_config_obj= json.loads(runtime_config)
+ mergedict(configObj,runtime_config_obj)
+
+ with open(configPath, 'w') as outfile:
+ yaml.safe_dump(configObj, outfile, default_flow_style=False)
+
+ output = execute_command('helm init --client-only --stable-repo-url '+stable_repo_url)
+ if output == False :
+ raise NonRecoverableError("helm init failed")
+
+
+
+
+@operation
+def start(**kwargs):
+ # install the ONAP Helm chart
+ # get properties from node
+ chartRepo = ctx.node.properties['chart-repo-url']
+ componentName = ctx.node.properties['component-name']
+ chartVersion = ctx.node.properties['chart-version']
+ config_dir_root= str(ctx.node.properties['config-dir'])
+ configPath=config_dir_root+str(ctx.deployment.id)+'/'+componentName+'.yaml'
+ namespace = ctx.node.properties['namespace']
+ configJson = str(ctx.node.properties['config'])
+ configJsonUrl = str(ctx.node.properties['config-url'])
+ runtimeconfigJson = str(ctx.node.properties['runtime-config'])
+
+
+ chart = chartRepo + "/" + componentName + "-" + chartVersion + ".tgz"
+ chartName = namespace + "-" + componentName
+
+ if configJson == '' and runtimeconfigJson == '' and configJsonUrl == '':
+ installCommand = 'helm install '+ chart + ' --name ' + chartName + ' --namespace ' + namespace+tiller_host()+tls()
+ else:
+ installCommand = 'helm install ' + chart + ' --name ' + chartName + ' --namespace ' + namespace + ' -f '+ configPath +tiller_host()+tls()
+
+ output =execute_command(installCommand)
+ if output == False :
+ return ctx.operation.retry(message='helm install failed, re-try after 5 second ',
+ retry_after=5)
+
+ get_current_helm_value(chartName)
+ get_helm_history(chartName)
+
+@operation
+def stop(**kwargs):
+ # delete the ONAP helm chart
+ #configure_admin_conf()
+ # get properties from node
+ namespace = ctx.node.properties['namespace']
+ component = ctx.node.properties['component-name']
+ chartName = namespace + "-" + component
+ config_dir_root= str(ctx.node.properties['config-dir'])
+ # Delete helm chart
+ command = 'helm delete --purge '+ chartName+tiller_host()+tls()
+ output =execute_command(command)
+ config_dir=config_dir_root+str(ctx.deployment.id)
+ shutil.rmtree(config_dir)
+ if output == False :
+ raise NonRecoverableError("helm delete failed")
+
+@operation
+def upgrade(**kwargs):
+ # upgrade the helm chart
+ componentName = ctx.node.properties['component-name']
+ config_dir_root= str(ctx.node.properties['config-dir'])
+ configPath=config_dir_root+str(ctx.deployment.id)+'/'+componentName+'.yaml'
+ componentName = ctx.node.properties['component-name']
+ namespace = ctx.node.properties['namespace']
+ configJson = kwargs['config']
+ chartRepo = kwargs['chart_repo']
+ chartVersion = kwargs['chart_version']
+
+ ctx.logger.debug('debug ' + str(configJson))
+ chartName = namespace + "-" + componentName
+ chart=chartRepo + "/" + componentName + "-" + chartVersion + ".tgz"
+ if str(configJson) == '':
+ upgradeCommand = 'helm upgrade '+ chartName + ' '+ chart+tiller_host()+tls()
+ else:
+ with open(configPath, 'w') as outfile:
+ yaml.safe_dump(configJson, outfile, default_flow_style=False)
+ #configure_admin_conf()
+ upgradeCommand = 'helm upgrade '+ chartName + ' '+ chart + ' -f ' + configPath+tiller_host()+tls()
+ output=execute_command(upgradeCommand)
+ if output == False :
+ return ctx.operation.retry(message='helm upgrade failed, re-try after 5 second ',
+ retry_after=5)
+ get_current_helm_value(chartName)
+ get_helm_history(chartName)
+
+@operation
+def rollback(**kwargs):
+ # rollback to some revision
+ componentName = ctx.node.properties['component-name']
+ namespace = ctx.node.properties['namespace']
+ revision = kwargs['revision']
+ #configure_admin_conf()
+ chartName = namespace + "-" + componentName
+ rollbackCommand = 'helm rollback '+ chartName + ' '+ revision+tiller_host()+tls()
+ output=execute_command(rollbackCommand)
+ if output == False :
+ return ctx.operation.retry(message='helm rollback failed, re-try after 5 second ',
+ retry_after=5)
+ get_current_helm_value(chartName)
+ get_helm_history(chartName)
diff --git a/helm/plugin/tests/__init__.py b/helm/plugin/tests/__init__.py
new file mode 100644
index 0000000..749f68f
--- /dev/null
+++ b/helm/plugin/tests/__init__.py
@@ -0,0 +1,14 @@
+########
+# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
diff --git a/helm/plugin/tests/blueprint/blueprint.yaml b/helm/plugin/tests/blueprint/blueprint.yaml
new file mode 100644
index 0000000..2588e8d
--- /dev/null
+++ b/helm/plugin/tests/blueprint/blueprint.yaml
@@ -0,0 +1,42 @@
+# DSL version, should appear in the main blueprint.yaml
+# and may appear in other imports. In such case, the versions must match
+tosca_definitions_version: cloudify_dsl_1_3
+
+imports:
+ # importing cloudify related types, plugins, workflow, etc...
+ # to speed things up, it is possible downloading this file,
+ # including it in the blueprint directory and importing it
+ # instead.
+ - http://www.getcloudify.org/spec/cloudify/4.1.1/types.yaml
+ # relative import of plugin.yaml that resides in the blueprint directory
+ - plugin/test_plugin.yaml
+
+inputs:
+ # example input that could be injected by test
+ test_input:
+ description: an input for the test
+ default: default_test_input
+
+node_templates:
+ # defining a single node template that will serve as our test node
+ test_node_template:
+ # using base cloudify type
+ type: cloudify.nodes.Root
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ start:
+ # here we map the single plugin task to the start operation
+ # of the cloudify.interfaces.lifecycle interface
+ implementation: plugin_name.plugin.tasks.my_task
+ inputs:
+ # my_task accepts a single property named
+ # some property. Here we inject this property
+ # from the input provided by the test
+ # (or 'default_test_input' if no input was provided)
+ some_property: { get_input: test_input }
+
+outputs:
+ # example output the could be used to simplify assertions by test
+ test_output:
+ description: an output for the test
+ value: { get_attribute: [test_node_template, some_property] }
diff --git a/helm/plugin/tests/blueprint/plugin/test_plugin.yaml b/helm/plugin/tests/blueprint/plugin/test_plugin.yaml
new file mode 100644
index 0000000..9701318
--- /dev/null
+++ b/helm/plugin/tests/blueprint/plugin/test_plugin.yaml
@@ -0,0 +1,20 @@
+plugins:
+ # Name could be anything, this name is what appears on the beginning of operation
+ # mappings.
+ plugin_name:
+ # Could be 'central_deployment_agent' or 'host_agent'.
+ # If 'central_deployment_agent', this plugin will be executed on the
+ # deployment dedicated agent, other wise it will be executed on the host agent.
+ # We set it the 'central_deployment_agent' here because 'host_agent' plugins should
+ # be contained in a host and this is not required for testing purposes
+ executor: central_deployment_agent
+
+ # Setting install to false in testing environment. In the non-test plugin definition
+ # this property could be omitted usually (its default is true), in which case
+ # the source property should be set
+ install: false
+
+ # source: URL to archive containing the plugin or name of directory containing
+ # the plugin if it is included in the the blueprint directory under the
+ # "plugins" directory. Not required in testing environments as the plugin
+ # need not be installed on any agent
diff --git a/helm/plugin/tests/test_plugin.py b/helm/plugin/tests/test_plugin.py
new file mode 100644
index 0000000..be0882f
--- /dev/null
+++ b/helm/plugin/tests/test_plugin.py
@@ -0,0 +1,47 @@
+########
+# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+
+
+from os import path
+import unittest
+
+from cloudify.test_utils import workflow_test
+
+
+class TestPlugin(unittest.TestCase):
+
+ @workflow_test(path.join('blueprint', 'blueprint.yaml'),
+ resources_to_copy=[(path.join('blueprint', 'plugin',
+ 'test_plugin.yaml'),
+ 'plugin')],
+ inputs={'test_input': 'new_test_input'})
+ def test_my_task(self, cfy_local):
+ # execute install workflow
+ """
+
+ :param cfy_local:
+ """
+ cfy_local.execute('install', task_retries=0)
+
+ # extract single node instance
+ instance = cfy_local.storage.get_node_instances()[0]
+
+ # assert runtime properties is properly set in node instance
+ self.assertEqual(instance.runtime_properties['some_property'],
+ 'new_test_input')
+
+ # assert deployment outputs are ok
+ self.assertDictEqual(cfy_local.outputs(),
+ {'test_output': 'new_test_input'})
diff --git a/helm/plugin/workflows.py b/helm/plugin/workflows.py
new file mode 100644
index 0000000..d341bf7
--- /dev/null
+++ b/helm/plugin/workflows.py
@@ -0,0 +1,64 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright (c) 2018 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+from cloudify.decorators import workflow
+from cloudify.workflows import ctx
+from cloudify.exceptions import NonRecoverableError
+import urllib2
+import json
+
+@workflow
+def upgrade(node_instance_id,config_json,config_json_url,chartVersion,chartRepo,**kwargs):
+ node_instance = ctx.get_node_instance(node_instance_id)
+
+ if not node_instance_id:
+ raise NonRecoverableError(
+ 'No such node_instance_id in deployment: {0}.'.format(
+ node_instance_id))
+
+ kwargs = {}
+ if config_json == '' and config_json_url == '':
+ kwargs['config'] = config_json
+ elif config_json == '' and config_json_url != '':
+ response = urllib2.urlopen(config_json_url)
+ kwargs['config'] = json.load(response)
+ elif config_json != '' and config_json_url == '':
+ kwargs['config'] = config_json
+ else:
+ raise NonRecoverableError("Unable to get Json config input")
+
+ kwargs['chart_version'] = str(chartVersion)
+ kwargs['chart_repo'] = str(chartRepo)
+ operation_args = {'operation': 'upgrade',}
+ operation_args['kwargs'] = kwargs
+ node_instance.execute_operation(**operation_args)
+
+
+@workflow
+def rollback(node_instance_id,revision,**kwargs):
+ node_instance = ctx.get_node_instance(node_instance_id)
+
+ if not node_instance_id:
+ raise NonRecoverableError(
+ 'No such node_instance_id in deployment: {0}.'.format(
+ node_instance_id))
+
+ kwargs = {}
+ kwargs['revision'] = str(revision)
+ operation_args = {'operation': 'rollback',}
+ operation_args['kwargs'] = kwargs
+ node_instance.execute_operation(**operation_args)