From dbb3ba15b358043f9e45d8dd211ce1bd970fb08e Mon Sep 17 00:00:00 2001 From: Bartek Grzybowski Date: Wed, 25 Sep 2019 11:21:42 +0200 Subject: Automate vCPE closed loop policy pushing Added a library routine to set up CL policy in an idempotent fashion. Description of CL pushing related manual step was removed from documentation. Change-Id: I1fad5d71481252ce803dd58c6ccbbcfa0a4d246f Signed-off-by: Bartek Grzybowski Issue-ID: INT-1267 --- docs/docs_vCPE.rst | 9 +- test/vcpe/preload_templates/operational.vcpe.json | 4 + .../preload_templates/operational.vcpe.json.txt | 4 - .../operational.vcpe.pap.json.txt | 8 -- test/vcpe/vcpe.py | 5 + test/vcpe/vcpecommon.py | 102 ++++++++++++++++++++- 6 files changed, 110 insertions(+), 22 deletions(-) create mode 100644 test/vcpe/preload_templates/operational.vcpe.json delete mode 100644 test/vcpe/preload_templates/operational.vcpe.json.txt delete mode 100644 test/vcpe/preload_templates/operational.vcpe.pap.json.txt diff --git a/docs/docs_vCPE.rst b/docs/docs_vCPE.rst index c6534b821..48b56c253 100644 --- a/docs/docs_vCPE.rst +++ b/docs/docs_vCPE.rst @@ -162,14 +162,7 @@ Here are the main steps to run the use case in Integration lab environment, wher ~/integration/test/vcpe# ~/oom/kubernetes/robot/demo-k8s.sh onap heatbridge vcpe_vfmodule_e2744f48729e4072b20b_201811262136 d8914ef3-3fdb-4401-adfe-823ee75dc604 vCPEvGMUX 10.0.101.21 -18. Push vCPE closed loop Policy. Copy the two operational policy from vcpe/preload_templates to Robot container and then run the following two commands inside Robot container. You can find more details in JIRA INT-1089 - Create vCPE closed loop policy and push to policy engine - -:: - - curl -k --silent --user 'healthcheck:zb!XztG34' -X POST "https://policy-api:6969/policy/api/v1/policytypes/onap.policies.controlloop.Operational/versions/1.0.0/policies" -H "Accept: application/json" -H "Content-Type: application/json" -d @operational.vcpe.json.txt - curl --silent -k --user 'healthcheck:zb!XztG34' -X POST "https://policy-pap:6969/policy/pap/v1/pdps/policies" -H "Accept: application/json" -H "Content-Type: application/json" -d @operational.vcpe.pap.json.txt - -19. Start closed loop test by triggering packet drop VES event, and monitor if vGMUX is restarting. You may need to run the command twice if the first run fails +18. Start closed loop test by triggering packet drop VES event, and monitor if vGMUX is restarting. You may need to run the command twice if the first run fails :: diff --git a/test/vcpe/preload_templates/operational.vcpe.json b/test/vcpe/preload_templates/operational.vcpe.json new file mode 100644 index 000000000..f5e9ead81 --- /dev/null +++ b/test/vcpe/preload_templates/operational.vcpe.json @@ -0,0 +1,4 @@ +{ + "policy-id": "operational.vcpe", + "content": "controlLoop%3A%0D%0A++version%3A+2.0.0%0D%0A++controlLoopName%3A+ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e%0D%0A++trigger_policy%3A+unique-policy-id-1-restart%0D%0A++timeout%3A+3600%0D%0A++abatement%3A+true%0D%0A+%0D%0Apolicies%3A%0D%0A++-+id%3A+unique-policy-id-1-restart%0D%0A++++name%3A+Restart+the+VM%0D%0A++++description%3A%0D%0A++++actor%3A+APPC%0D%0A++++recipe%3A+Restart%0D%0A++++target%3A%0D%0A++++++type%3A+VM%0D%0A++++retry%3A+3%0D%0A++++timeout%3A+1200%0D%0A++++success%3A+final_success%0D%0A++++failure%3A+final_failure%0D%0A++++failure_timeout%3A+final_failure_timeout%0D%0A++++failure_retries%3A+final_failure_retries%0D%0A++++failure_exception%3A+final_failure_exception%0D%0A++++failure_guard%3A+final_failure_guard" +} diff --git a/test/vcpe/preload_templates/operational.vcpe.json.txt b/test/vcpe/preload_templates/operational.vcpe.json.txt deleted file mode 100644 index f5e9ead81..000000000 --- a/test/vcpe/preload_templates/operational.vcpe.json.txt +++ /dev/null @@ -1,4 +0,0 @@ -{ - "policy-id": "operational.vcpe", - "content": "controlLoop%3A%0D%0A++version%3A+2.0.0%0D%0A++controlLoopName%3A+ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e%0D%0A++trigger_policy%3A+unique-policy-id-1-restart%0D%0A++timeout%3A+3600%0D%0A++abatement%3A+true%0D%0A+%0D%0Apolicies%3A%0D%0A++-+id%3A+unique-policy-id-1-restart%0D%0A++++name%3A+Restart+the+VM%0D%0A++++description%3A%0D%0A++++actor%3A+APPC%0D%0A++++recipe%3A+Restart%0D%0A++++target%3A%0D%0A++++++type%3A+VM%0D%0A++++retry%3A+3%0D%0A++++timeout%3A+1200%0D%0A++++success%3A+final_success%0D%0A++++failure%3A+final_failure%0D%0A++++failure_timeout%3A+final_failure_timeout%0D%0A++++failure_retries%3A+final_failure_retries%0D%0A++++failure_exception%3A+final_failure_exception%0D%0A++++failure_guard%3A+final_failure_guard" -} diff --git a/test/vcpe/preload_templates/operational.vcpe.pap.json.txt b/test/vcpe/preload_templates/operational.vcpe.pap.json.txt deleted file mode 100644 index 22c50e7dd..000000000 --- a/test/vcpe/preload_templates/operational.vcpe.pap.json.txt +++ /dev/null @@ -1,8 +0,0 @@ -{ - "policies": [ - { - "policy-id": "operational.vcpe", - "policy-version": 1 - } - ] -} diff --git a/test/vcpe/vcpe.py b/test/vcpe/vcpe.py index f8c9b81aa..dd286bd30 100755 --- a/test/vcpe/vcpe.py +++ b/test/vcpe/vcpe.py @@ -159,6 +159,11 @@ def closed_loop(lossrate=0): nodes = ['brg', 'mux'] logger = logging.getLogger('__name__') vcpecommon = VcpeCommon(nodes) + + logger.info('Setting up closed loop policy') + policy_template_file = vcpecommon.find_file('operational.vcpe', 'json', 'preload_templates') + vcpecommon.set_closed_loop_policy(policy_template_file) + logger.info('Cleaning up vGMUX data reporting settings') vcpecommon.del_vgmux_ves_mode() time.sleep(2) diff --git a/test/vcpe/vcpecommon.py b/test/vcpe/vcpecommon.py index bb83c2dcb..78085fc1f 100755 --- a/test/vcpe/vcpecommon.py +++ b/test/vcpe/vcpecommon.py @@ -56,9 +56,10 @@ class VcpeCommon: } ############################################################################# - # set name of sdnc controller pod, prefix is taken from helm environment name + # Set name of Onap's k8s namespace and sdnc controller pod # CHANGEME part - sdnc_controller_pod = 'dev-sdnc-sdnc-0' + onap_namespace = 'dev' + sdnc_controller_pod = '-'.join([onap_namespace,'sdnc-sdnc-0']) template_variable_symbol = '${' cpe_vm_prefix = 'zdcpe' @@ -196,6 +197,17 @@ class VcpeCommon: self.vpp_api_userpass = ('admin', 'admin') self.vpp_ves_url= 'http://{0}:8183/restconf/config/vesagent:vesagent' + ############################################################################################# + # POLICY urls + self.policy_userpass = ('healthcheck', 'zb!XztG34') + self.policy_headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} + self.policy_api_url = 'https://{0}:6969/policy/api/v1/policytypes/onap.policies.controlloop.Operational/versions/1.0.0/policies' + self.policy_pap_get_url = 'https://{0}:6969/policy/pap/v1/pdps' + self.policy_pap_json = {'policies': [{'policy-id': 'operational.vcpe'}]} + self.policy_pap_post_url = self.policy_pap_get_url + '/policies' + self.policy_api_service_name = 'policy-api' + self.policy_pap_service_name = 'policy-pap' + def heatbridge(self, openstack_stack_name, svc_instance_uuid): """ Add vserver information to AAI @@ -323,6 +335,74 @@ class VcpeCommon: self.logger.error("Can't get subnet info from network name: " + network_name) return False + def set_closed_loop_policy(self, policy_template_file): + # Gather policy services cluster ips + p_api_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_api_service_name) + p_pap_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_pap_service_name) + + # Read policy json from file + with open(policy_template_file) as f: + try: + policy_json = json.load(f) + except ValueError: + self.logger.error(policy_template_file + " doesn't seem to contain valid JSON data") + sys.exit() + + # Check policy already applied + requests.packages.urllib3.disable_warnings() + policy_exists_req = requests.get(self.policy_pap_get_url.format( + p_pap_cluster_ip), auth=self.policy_userpass, + verify=False, headers=self.policy_headers) + if policy_exists_req.status_code != 200: + self.logger.error('Failure in checking CL policy existence. ' + 'Policy-pap responded with HTTP code {0}'.format( + policy_exists_req.status_code)) + sys.exit() + + try: + policy_exists_json = policy_exists_req.json() + except ValueError as e: + self.logger.error('Policy-pap request failed: ' + e.message) + sys.exit() + + try: + assert policy_exists_json['groups'][0]['pdpSubgroups'] \ + [1]['policies'][0]['name'] != 'operational.vcpe' + except AssertionError: + self.logger.info('vCPE closed loop policy already exists, not applying') + return + except IndexError: + pass # policy doesn't exist + + # Create policy + policy_create_req = requests.post(self.policy_api_url.format( + p_api_cluster_ip), auth=self.policy_userpass, + json=policy_json, verify=False, + headers=self.policy_headers) + # Get the policy id from policy-api response + if policy_create_req.status_code != 200: + self.logger.error('Failed creating policy. Policy-api responded' + ' with HTTP code {0}'.format(policy_create_req.status_code)) + sys.exit() + + try: + policy_version = json.loads(policy_create_req.text)['policy-version'] + except (KeyError, ValueError): + self.logger.error('Policy API response not understood:') + self.logger.debug('\n' + str(policy_create_req.text)) + + # Inject the policy into Policy PAP + self.policy_pap_json['policies'].append({'policy-version': policy_version}) + policy_insert_req = requests.post(self.policy_pap_post_url.format( + p_pap_cluster_ip), auth=self.policy_userpass, + json=self.policy_pap_json, verify=False, + headers=self.policy_headers) + if policy_insert_req.status_code != 200: + self.logger.error('Policy PAP request failed with HTTP code' + '{0}'.format(policy_insert_req.status_code)) + sys.exit() + self.logger.info('Successully pushed closed loop Policy') + def is_node_in_aai(self, node_type, node_uuid): key = None search_node_type = None @@ -463,6 +543,24 @@ class VcpeCommon: vm_ip[vm] = self.oom_so_sdnc_aai_ip return vm_ip + def get_k8s_service_cluster_ip(self, service): + """ + Returns cluster IP for a given service + :param service: name of the service + :return: cluster ip + """ + config.load_kube_config() + api = client.CoreV1Api() + kslogger = logging.getLogger('kubernetes') + kslogger.setLevel(logging.INFO) + try: + resp = api.read_namespaced_service(service, self.onap_namespace) + except client.rest.ApiException as e: + self.logger.error('Error while making k8s API request: ' + e.body) + sys.exit() + + return resp.spec.cluster_ip + def extract_vm_ip_as_dict(self, novalist_results, net_addr, net_addr_len): vm_ip_dict = {} for line in novalist_results.split('\n'): -- cgit 1.2.3-korg