aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBartek Grzybowski <b.grzybowski@partner.samsung.com>2019-10-16 15:28:23 +0200
committerBartek Grzybowski <b.grzybowski@partner.samsung.com>2019-10-16 15:28:23 +0200
commit9018a8458ca5cd3a39c9f2b6fb7eab8bc9284ac9 (patch)
treedc75b6c79d884d68093bc64fb7e48212753798ba
parentf2d8bca52e0ff36f3b114bc557e9b88c8aad6098 (diff)
Automate SDNC ip pool insertion into database
This patch automates SDNC ip pool generation in sdnctl database in the scope of vcpe.py 'init' stage. Change-Id: I6322ff2dadb069991be0eddbb0cf415baa7984f6 Signed-off-by: Bartek Grzybowski <b.grzybowski@partner.samsung.com> Issue-ID: TEST-222
-rw-r--r--docs/docs_vCPE.rst29
-rwxr-xr-xtest/vcpe/config_sdnc_so.py13
-rwxr-xr-xtest/vcpe/vcpe.py1
-rwxr-xr-xtest/vcpe/vcpecommon.py36
4 files changed, 62 insertions, 17 deletions
diff --git a/docs/docs_vCPE.rst b/docs/docs_vCPE.rst
index 25c4647c2..cff5f3f27 100644
--- a/docs/docs_vCPE.rst
+++ b/docs/docs_vCPE.rst
@@ -58,20 +58,15 @@ Here are the main steps to run the use case in Integration lab environment, wher
ip route add 10.3.0.0/24 via 10.0.101.10 dev ens3
-5. Initialize SDNC ip pool by running command from Rancher node
-::
-
- kubectl -n onap exec -it dev-sdnc-sdnc-0 -- /opt/sdnc/bin/addIpAddresses.sh VGW 10.5.0 22 250
-
-6. Install Python and other Python libraries
+5. Install Python and other Python libraries
::
integration/test/vcpe/bin/setup.sh
-7. Change the Openstack env parameters and one customer service related parameter in vcpecommon.py
+6. Change the Openstack env parameters and one customer service related parameter in vcpecommon.py
::
@@ -99,25 +94,25 @@ Here are the main steps to run the use case in Integration lab environment, wher
# CHANGEME: vgw_VfModuleModelInvariantUuid is in rescust service csar, open service template with filename like service-VcpesvcRescust1118-template.yml and look for vfModuleModelInvariantUUID under groups vgw module metadata.
self.vgw_VfModuleModelInvariantUuid = 'xxxxxxxxxxxxxxx'
-8. Initialize vcpe
+7. Initialize vcpe
::
vcpe.py init
-9. Run a command from Rancher node to insert vcpe customer service workflow entry in SO catalogdb. You should be able to see a sql command printed out from the above step output at the end, and use that sql command to replace the sample sql command below (inside the double quote) and run it from Rancher node:
+8. Run a command from Rancher node to insert vcpe customer service workflow entry in SO catalogdb. You should be able to see a sql command printed out from the above step output at the end, and use that sql command to replace the sample sql command below (inside the double quote) and run it from Rancher node:
::
kubectl exec dev-mariadb-galera-mariadb-galera-0 -- mysql -uroot -psecretpassword catalogdb -e "INSERT INTO service_recipe (ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, SERVICE_PARAM_XSD, RECIPE_TIMEOUT, SERVICE_TIMEOUT_INTERIM, CREATION_TIMESTAMP, SERVICE_MODEL_UUID) VALUES ('createInstance','1','vCPEResCust 2019-06-03 _04ba','/mso/async/services/CreateVcpeResCustService',NULL,181,NULL, NOW(),'6c4a469d-ca2c-4b02-8cf1-bd02e9c5a7ce')"
-10. Run Robot to create and distribute for vCPE customer service. This step assumes step 1 has successfully distributed all vcpe models except customer service model
+9. Run Robot to create and distribute for vCPE customer service. This step assumes step 1 has successfully distributed all vcpe models except customer service model
::
ete-k8s.sh onap distributevCPEResCust
-11. Manually copy vCPE customer service csar (starting with service-Vcperescust) under Robot container /tmp/csar directory to Rancher vcpe/csar directory, now you should have these files:
+10. Manually copy vCPE customer service csar (starting with service-Vcperescust) under Robot container /tmp/csar directory to Rancher vcpe/csar directory, now you should have these files:
::
@@ -130,33 +125,33 @@ Here are the main steps to run the use case in Integration lab environment, wher
-rw-r--r-- 1 root root 78156 Jun 26 11:28 service-Demovcpevgw-csar.csar
-rw-r--r-- 1 root root 83892 Jun 26 11:28 service-Vcperescust20190625D996-csar.csar
-12. Instantiate vCPE infra services
+11. Instantiate vCPE infra services
::
vcpe.py infra
-13. From Rancher node run vcpe healthcheck command to check connectivity from sdnc to brg and gmux, and vpp configuration of brg and gmux. Write down BRG MAC address printed out at the last line
+12. From Rancher node run vcpe healthcheck command to check connectivity from sdnc to brg and gmux, and vpp configuration of brg and gmux. Write down BRG MAC address printed out at the last line
::
healthcheck-k8s.py --namespace <namespace name> --environment <env name>
-14. Instantiate vCPE customer service. Input the BRG MAC when prompt
+13. Instantiate vCPE customer service. Input the BRG MAC when prompt
::
vcpe.py customer
-15. Update libevel.so in vGMUX VM and restart the VM. This allows vGMUX to send events to VES collector in close loop test. See tutorial wiki for details
+14. Update libevel.so in vGMUX VM and restart the VM. This allows vGMUX to send events to VES collector in close loop test. See tutorial wiki for details
-16. Run heatbridge. The heatbridge command usage: demo-k8s.sh <namespace> heatbridge <stack_name> <service_instance_id> <service> <oam-ip-address>, please refer to vCPE tutorial page on how to fill in those paraemters. See an example as following:
+15. Run heatbridge. The heatbridge command usage: demo-k8s.sh <namespace> heatbridge <stack_name> <service_instance_id> <service> <oam-ip-address>, please refer to vCPE tutorial page on how to fill in those paraemters. See an example as following:
::
~/integration/test/vcpe# ~/oom/kubernetes/robot/demo-k8s.sh onap heatbridge vcpe_vfmodule_e2744f48729e4072b20b_201811262136 d8914ef3-3fdb-4401-adfe-823ee75dc604 vCPEvGMUX 10.0.101.21
-17. Start closed loop test by triggering packet drop VES event, and monitor if vGMUX is restarting. You may need to run the command twice if the first run fails
+16. Start closed loop test by triggering packet drop VES event, and monitor if vGMUX is restarting. You may need to run the command twice if the first run fails
::
diff --git a/test/vcpe/config_sdnc_so.py b/test/vcpe/config_sdnc_so.py
index 46d4c1c4c..13ac47bee 100755
--- a/test/vcpe/config_sdnc_so.py
+++ b/test/vcpe/config_sdnc_so.py
@@ -81,3 +81,16 @@ def insert_customer_service_to_so(vcpecommon):
'Please manually run the following sql command in SO catalogdb database to insert customer service recipe')
logger.info('\n'.join(cmds))
#vcpecommon.execute_cmds_so_db(cmds)
+
+def insert_sdnc_ip_pool(vcpecommon):
+ logger = logging.getLogger(__name__)
+ logger.info('Inserting SDNC ip pool to SDNC DB')
+ cmds = []
+ # Get the VGWs network address
+ vgw_net = '.'.join(vcpecommon.preload_network_config['mux_gw'][0].split('.')[:3])
+ row_values = []
+ # Prepare single INSERT statement with all IP values
+ for ip in range(22,250):
+ row_values.append("('', 'VGW', 'AVAILABLE','{0}.{1}')".format(vgw_net,ip))
+ cmds.append("INSERT INTO IPV4_ADDRESS_POOL VALUES" + ', '.join(row_values) + ';')
+ vcpecommon.execute_cmds_mariadb(cmds)
diff --git a/test/vcpe/vcpe.py b/test/vcpe/vcpe.py
index 49fc0e488..9c533a065 100755
--- a/test/vcpe/vcpe.py
+++ b/test/vcpe/vcpe.py
@@ -182,6 +182,7 @@ def closed_loop(lossrate=0):
def init_so_sdnc():
logger = logging.getLogger('__name__')
vcpecommon = VcpeCommon()
+ config_sdnc_so.insert_sdnc_ip_pool(vcpecommon)
config_sdnc_so.insert_customer_service_to_so(vcpecommon)
#config_sdnc_so.insert_customer_service_to_sdnc(vcpecommon)
vgw_vfmod_name_index= 0
diff --git a/test/vcpe/vcpecommon.py b/test/vcpe/vcpecommon.py
index 4b69fe429..4a393f53d 100755
--- a/test/vcpe/vcpecommon.py
+++ b/test/vcpe/vcpecommon.py
@@ -211,6 +211,11 @@ class VcpeCommon:
self.policy_api_service_name = 'policy-api'
self.policy_pap_service_name = 'policy-pap'
+ #############################################################################################
+ # MARIADB-GALERA settings
+ self.mariadb_galera_endpoint_ip = self.get_k8s_service_endpoint_info('mariadb-galera','ip')
+ self.mariadb_galera_endpoint_port = self.get_k8s_service_endpoint_info('mariadb-galera','port')
+
def heatbridge(self, openstack_stack_name, svc_instance_uuid):
"""
Add vserver information to AAI
@@ -253,6 +258,11 @@ class VcpeCommon:
assert mac_recent
return mac_recent
+ def execute_cmds_mariadb(self, cmds):
+ self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass,
+ self.sdnc_db_name, self.mariadb_galera_endpoint_ip,
+ self.mariadb_galera_endpoint_port)
+
def execute_cmds_sdnc_db(self, cmds):
self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass, self.sdnc_db_name,
self.hosts['sdnc'], self.sdnc_db_port)
@@ -564,6 +574,32 @@ class VcpeCommon:
return resp.spec.cluster_ip
+ def get_k8s_service_endpoint_info(self, service, subset):
+ """
+ Returns endpoint data for a given service and subset. If there
+ is more than one endpoint returns data for the first one from
+ the list that API returned.
+ :param service: name of the service
+ :param subset: subset name, one of "ip","port"
+ :return: endpoint ip
+ """
+ config.load_kube_config()
+ api = client.CoreV1Api()
+ kslogger = logging.getLogger('kubernetes')
+ kslogger.setLevel(logging.INFO)
+ try:
+ resp = api.read_namespaced_endpoints(service, self.onap_namespace)
+ except client.rest.ApiException as e:
+ self.logger.error('Error while making k8s API request: ' + e.body)
+ sys.exit()
+
+ if subset == "ip":
+ return resp.subsets[0].addresses[0].ip
+ elif subset == "port":
+ return resp.subsets[0].ports[0].port
+ else:
+ self.logger.error("Unsupported subset type")
+
def extract_vm_ip_as_dict(self, novalist_results, net_addr, net_addr_len):
vm_ip_dict = {}
for line in novalist_results.split('\n'):