summaryrefslogtreecommitdiffstats
path: root/robotframework-onap
diff options
context:
space:
mode:
Diffstat (limited to 'robotframework-onap')
-rw-r--r--robotframework-onap/.gitignore262
-rw-r--r--robotframework-onap/README.TXT6
-rw-r--r--robotframework-onap/eteutils/DNSUtils.py17
-rw-r--r--robotframework-onap/eteutils/EteGatherDataListener.py126
-rw-r--r--robotframework-onap/eteutils/HEATUtils.py87
-rw-r--r--robotframework-onap/eteutils/HTTPUtils.py18
-rw-r--r--robotframework-onap/eteutils/JSONUtils.py41
-rw-r--r--robotframework-onap/eteutils/OSUtils.py14
-rw-r--r--robotframework-onap/eteutils/OpenstackLibrary.py124
-rw-r--r--robotframework-onap/eteutils/RequestsClientCert.py7
-rw-r--r--robotframework-onap/eteutils/StringTemplater.py9
-rw-r--r--robotframework-onap/eteutils/TemplatingEngine.py34
-rw-r--r--robotframework-onap/eteutils/UUID.py15
-rw-r--r--robotframework-onap/eteutils/__init__.py0
-rw-r--r--robotframework-onap/eteutils/csvLibrary.py16
-rw-r--r--robotframework-onap/loadtest/RunEte.py39
-rw-r--r--robotframework-onap/loadtest/TestConfig.py59
-rw-r--r--robotframework-onap/loadtest/TestController.py80
-rw-r--r--robotframework-onap/loadtest/TestMain.py93
-rw-r--r--robotframework-onap/loadtest/__init__.py0
-rw-r--r--robotframework-onap/pom.xml85
-rw-r--r--robotframework-onap/setup-template.py48
-rw-r--r--robotframework-onap/setup.cfg5
-rw-r--r--robotframework-onap/setup.py48
-rw-r--r--robotframework-onap/tox.ini12
-rwxr-xr-xrobotframework-onap/vcpeutils/SoUtils.py371
-rw-r--r--robotframework-onap/vcpeutils/__init__.py0
-rwxr-xr-xrobotframework-onap/vcpeutils/csar_parser.py231
-rwxr-xr-xrobotframework-onap/vcpeutils/preload.py231
-rwxr-xr-xrobotframework-onap/vcpeutils/vcpecommon.py325
30 files changed, 2403 insertions, 0 deletions
diff --git a/robotframework-onap/.gitignore b/robotframework-onap/.gitignore
new file mode 100644
index 0000000..70a57dd
--- /dev/null
+++ b/robotframework-onap/.gitignore
@@ -0,0 +1,262 @@
+.tox/*
+python_openecomp_eteutils.egg-info/*
+
+# Created by https://www.gitignore.io/api/node,sonar,maven,eclipse,sonarqube,intellij+all
+
+### Eclipse ###
+
+.metadata
+bin/
+tmp/
+*.tmp
+*.bak
+*.swp
+*~.nib
+local.properties
+.settings/
+.loadpath
+.recommenders
+
+# External tool builders
+.externalToolBuilders/
+
+# Locally stored "Eclipse launch configurations"
+*.launch
+
+# PyDev specific (Python IDE for Eclipse)
+*.pydevproject
+
+# CDT-specific (C/C++ Development Tooling)
+.cproject
+
+# CDT- autotools
+.autotools
+
+# Java annotation processor (APT)
+.factorypath
+
+# PDT-specific (PHP Development Tools)
+.buildpath
+
+# sbteclipse plugin
+.target
+
+# Tern plugin
+.tern-project
+
+# TeXlipse plugin
+.texlipse
+
+# STS (Spring Tool Suite)
+.springBeans
+
+# Code Recommenders
+.recommenders/
+
+# Annotation Processing
+.apt_generated/
+
+# Scala IDE specific (Scala & Java development for Eclipse)
+.cache-main
+.scala_dependencies
+.worksheet
+
+### Eclipse Patch ###
+# Eclipse Core
+.project
+
+# JDT-specific (Eclipse Java Development Tools)
+.classpath
+
+# Annotation Processing
+.apt_generated
+
+.sts4-cache/
+
+### Intellij+all ###
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# Generated files
+.idea/**/contentModel.xml
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+
+# CMake
+cmake-build-*/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
+
+# Android studio 3.1+ serialized cache file
+.idea/caches/build_file_checksums.ser
+
+### Intellij+all Patch ###
+# Ignores the whole .idea folder and all .iml files
+# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
+
+.idea/
+
+# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
+
+*.iml
+modules.xml
+.idea/misc.xml
+*.ipr
+
+### Maven ###
+target/
+pom.xml.tag
+pom.xml.releaseBackup
+pom.xml.versionsBackup
+pom.xml.next
+release.properties
+dependency-reduced-pom.xml
+buildNumber.properties
+.mvn/timing.properties
+.mvn/wrapper/maven-wrapper.jar
+
+### Node ###
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+
+# Runtime data
+pids
+*.pid
+*.seed
+*.pid.lock
+
+# Directory for instrumented libs generated by jscoverage/JSCover
+lib-cov
+
+# Coverage directory used by tools like istanbul
+coverage
+
+# nyc test coverage
+.nyc_output
+
+# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
+.grunt
+
+# Bower dependency directory (https://bower.io/)
+bower_components
+
+# node-waf configuration
+.lock-wscript
+
+# Compiled binary addons (https://nodejs.org/api/addons.html)
+build/Release
+
+# Dependency directories
+node_modules/
+jspm_packages/
+
+# TypeScript v1 declaration files
+typings/
+
+# Optional npm cache directory
+.npm
+
+# Optional eslint cache
+.eslintcache
+
+# Optional REPL history
+.node_repl_history
+
+# Output of 'npm pack'
+*.tgz
+
+# Yarn Integrity file
+.yarn-integrity
+
+# dotenv environment variables file
+.env
+
+# parcel-bundler cache (https://parceljs.org/)
+.cache
+
+# next.js build output
+.next
+
+# nuxt.js build output
+.nuxt
+
+# vuepress build output
+.vuepress/dist
+
+# Serverless directories
+.serverless
+
+### Sonar ###
+#Sonar generated dir
+/.sonar/
+
+### SonarQube ###
+# SonarQube ignore files.
+#
+# https://docs.sonarqube.org/display/SCAN/Analyzing+with+SonarQube+Scanner
+# Sonar Scanner working directories
+.sonar/
+.scannerwork/
+
+# http://www.sonarlint.org/commandline/
+# SonarLint working directories, configuration files (including credentials)
+.sonarlint/
+
+
+# End of https://www.gitignore.io/api/node,sonar,maven,eclipse,sonarqube,intellij+all
+.flattened-pom.xml \ No newline at end of file
diff --git a/robotframework-onap/README.TXT b/robotframework-onap/README.TXT
new file mode 100644
index 0000000..9989546
--- /dev/null
+++ b/robotframework-onap/README.TXT
@@ -0,0 +1,6 @@
+Robotframework-ONAP
+=======================
+
+Robot framework plugin to work with onap
+
+to install locally, checkout this repo and then run 'pip install -e .' in the root \ No newline at end of file
diff --git a/robotframework-onap/eteutils/DNSUtils.py b/robotframework-onap/eteutils/DNSUtils.py
new file mode 100644
index 0000000..65ae68b
--- /dev/null
+++ b/robotframework-onap/eteutils/DNSUtils.py
@@ -0,0 +1,17 @@
+import dns.message
+import dns.name
+import dns.query
+
+class DNSUtils:
+ """ Utilities useful for DNS requests """
+
+ def dns_request(self, domain, ns):
+ """ return the ip address of the given domain name from the given nameserver """
+ request = dns.message.make_query(domain, dns.rdatatype.A);
+ request.flags |= dns.flags.AD;
+ request.find_rrset(request.additional, dns.name.root, 65535, dns.rdatatype.OPT, create=True, force_unique=True)
+ response = dns.query.udp(request, ns)
+
+ for answer in response.answer:
+ for item in answer.items:
+ return item \ No newline at end of file
diff --git a/robotframework-onap/eteutils/EteGatherDataListener.py b/robotframework-onap/eteutils/EteGatherDataListener.py
new file mode 100644
index 0000000..79c02b4
--- /dev/null
+++ b/robotframework-onap/eteutils/EteGatherDataListener.py
@@ -0,0 +1,126 @@
+import os.path
+import paramiko
+import logging
+from sys import stderr
+
+"""
+EteGatherDataListener implements the ROBOT listener API version 2 and is
+instantiated via the robot cammmand line option
+
+ --listener EteGatherDataListener:<jobbumber>:<key filename>
+
+The purpose is to gather and preserve debugging data from each of the application
+VMs when an ETE test fails.
+
+This listener counts the number of test
+cases that have failed and, if > 0 at then end of the robot exection (close()),
+will connect to each application vm and
+
+2. upload the gather_data.sh
+2. execute gather_data.sh
+3. Transfer the resulting zip file to the Robot reports folder
+
+This will enable the Jenkins job to retrieve the debug data along with the
+Robot logs and reports and archive it with the failed job for later retreival.
+
+Note that the gather_data.sh depends upon the application providing
+a /opt/gather_application_data.sh on their respective VMs for the zip file
+to be created.
+"""
+
+
+class EteGatherDataListener(object):
+ ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
+ ROBOT_LISTENER_API_VERSION = 2
+
+ APPLICATIONS = {
+ "aai" : "10.0.1.1",
+ "appc" : "10.0.2.1",
+ "sdc" : "10.0.3.1",
+ "dcae" : "10.0.4.1",
+ "mso" : "10.0.5.1",
+ "policy" : "10.0.6.1",
+ "sdnc" : "10.0.7.1",
+ "vid" : "10.0.8.1",
+ "portal" : "10.0.9.1",
+ "message_router" : "10.0.11.1",
+ "dcae_pstg00" : "10.0.4.101",
+ "dcae_coll00" : "10.0.4.102",
+ "dcae_cdap00" : "10.0.4.103",
+ "dcae_cdap01" : "10.0.4.104",
+ "dcae_cdap02" : "10.0.4.105"
+ }
+
+ keyfile = ""
+ local_gather_data_sh = ""
+
+ def __init__(self, job='10', keyfile='/share/config/key.pvt', shell="gather_data.sh"):
+ self.tests_passed = 0
+ self.tests_failed = 0
+ self.output_folder = ''
+ self.job = job
+ self.folder= ''
+ self.keyfile = keyfile
+ self.local_gather_data_sh = shell
+ print "EteGatherDataListener instantiated"
+
+ def end_test(self, name, attrs):
+ if attrs['status'] == 'PASS':
+ self.tests_passed+=1
+ else:
+ self.tests_failed+=1
+
+ def output_file(self, path):
+ if (self.folder != ''):
+ return
+ self.folder = os.path.dirname(path)
+ print(self.folder)
+
+ def close(self):
+ print "EteGatherDataListener tests failed=" + str(self.tests_failed)
+ if (self.tests_failed > 0):
+ self.gather_debug_data()
+
+ def gather_debug_data(self):
+
+ for application in self.APPLICATIONS.keys():
+ self.gather_application_data(application, self.APPLICATIONS.get(application))
+
+ def gather_application_data(self, application, ip):
+ extra = {"_threadid" : 1}
+ paramiko.util.log_to_file(self.folder + "/paramiko.log", level=0)
+ log = logging.getLogger("paramiko")
+ ssh = paramiko.SSHClient()
+ try:
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ ssh.connect(ip,username="root", key_filename=self.keyfile)
+ except paramiko.SSHException:
+ log.error("Connection Failed to " + ip, extra=extra)
+ return
+ try:
+ gather_data_sh = "/tmp/gather_data.sh"
+ ftp = ssh.open_sftp()
+ ftp.put(self.local_gather_data_sh, gather_data_sh)
+ ftp.close()
+ stdin, stdout, stderr = ssh.exec_command("/bin/bash "+ gather_data_sh + " " + application + " " + self.job)
+ error = stderr.read()
+ if (error != ''):
+ log.info("stderr:" + error, extra=extra)
+ ssh.close()
+ return;
+ # No error? ASsume we have a file to download.
+ out = stdout.read()
+ log.info("stdout:" + out, extra=extra)
+ filename = application + "_" + self.job + ".tar.gz"
+ localzip = self.folder + "/" + filename
+ remotezip = "/tmp/gather_data/" + filename
+ ftp = ssh.open_sftp()
+ ftp.get(remotezip, localzip)
+ ftp.close()
+ stdin, stdout, stderr = ssh.exec_command("rm -rf " + remotezip);
+ ssh.close()
+ except paramiko.SSHException:
+ ssh.close()
+ return
+
+
diff --git a/robotframework-onap/eteutils/HEATUtils.py b/robotframework-onap/eteutils/HEATUtils.py
new file mode 100644
index 0000000..15c5689
--- /dev/null
+++ b/robotframework-onap/eteutils/HEATUtils.py
@@ -0,0 +1,87 @@
+import json
+import yaml
+import StringIO
+import copy
+from hashlib import md5
+from paramiko import RSAKey
+from paramiko.ssh_exception import PasswordRequiredException
+
+class HEATUtils:
+ """ Utilities useful for constructing OpenStack HEAT requests """
+
+ def get_yaml(self, template_file):
+ """Template Yaml To Json reads a YAML Heat template file returns a JSON string that can be used included in an Openstack Add Stack Request"""
+ if isinstance(template_file, basestring):
+ fin = open(template_file, 'r')
+ yamlobj = yaml.load(fin)
+ return yamlobj
+ return None
+
+ def template_yaml_to_json(self, template_file):
+ """Template Yaml To Json reads a YAML Heat template file returns a JSON string that can be used included in an Openstack Add Stack Request"""
+ if isinstance(template_file, basestring):
+ fin = open(template_file, 'r')
+ yamlobj = yaml.load(fin)
+ fin.close()
+ if 'heat_template_version' in yamlobj:
+ datetime = yamlobj['heat_template_version']
+ yamlobj['heat_template_version'] = str(datetime)
+ fout = StringIO.StringIO()
+ json.dump(yamlobj, fout)
+ contents = fout.getvalue()
+ fout.close()
+ return contents
+
+ def env_yaml_to_json(self, template_file):
+ """Env Yaml To JSon reads a YAML Heat env file and returns a JSON string that can be used included in an Openstack Add Stack Request"""
+ if isinstance(template_file, basestring):
+ fin = open(template_file, 'r')
+ yamlobj = yaml.load(fin)
+ fin.close()
+ if 'parameters' in yamlobj:
+ fout = StringIO.StringIO()
+ json.dump(yamlobj['parameters'], fout)
+ contents = fout.getvalue()
+ fout.close()
+ return contents
+ return None
+
+ def stack_info_parse(self, stack_info):
+ """ returns a flattened version of the Openstack Find Stack results """
+ d = {}
+ if isinstance(stack_info, dict):
+ s = stack_info['stack']
+ p = s['parameters']
+ d = copy.deepcopy(p)
+ d['id'] = s['id']
+ d['name'] = s['stack_name']
+ d['stack_status'] = s['stack_status']
+ return d
+
+
+ def match_fingerprint(self, pvt_file, pw, fingerprint):
+ try:
+ sshKey = RSAKey.from_private_key_file(pvt_file, pw)
+ keybytes = md5(sshKey.asbytes()).hexdigest()
+ printableFingerprint = ':'.join(a+b for a,b in zip(keybytes[::2], keybytes[1::2]))
+ return printableFingerprint == fingerprint.__str__()
+ except PasswordRequiredException:
+ return False
+
+ def match_private_key_file_to_keypair(self, files, keypair):
+ for keyfile in files:
+ if (self.match_fingerprint(keyfile, None, keypair['keypair']['fingerprint'])):
+ return keyfile
+ return None
+
+ def get_openstack_server_ip(self, server, network_name="public", ipversion=4):
+ ipaddr = None
+ try:
+ versions = server['addresses'][network_name]
+ for version in versions:
+ if version['version'] == ipversion:
+ ipaddr = version['addr']
+ break;
+ except ValueError:
+ return ipaddr
+ return ipaddr \ No newline at end of file
diff --git a/robotframework-onap/eteutils/HTTPUtils.py b/robotframework-onap/eteutils/HTTPUtils.py
new file mode 100644
index 0000000..9324af7
--- /dev/null
+++ b/robotframework-onap/eteutils/HTTPUtils.py
@@ -0,0 +1,18 @@
+import urllib
+import urllib3
+import urlparse
+
+class HTTPUtils:
+ """HTTPUtils is common resource for simple http helper keywords."""
+
+ def url_encode_string(self, barestring):
+ """URL Encode String takes in a string and converts into 'percent-encoded' string"""
+ return urllib.quote_plus(barestring)
+
+ def disable_warnings(self):
+ """ Disable the cert warnings when creating sessions for A&AI API Calls """
+ urllib3.disable_warnings()
+
+ def url_parse(self, url):
+ """ Get pieces of the URL """
+ return urlparse.urlparse(url) \ No newline at end of file
diff --git a/robotframework-onap/eteutils/JSONUtils.py b/robotframework-onap/eteutils/JSONUtils.py
new file mode 100644
index 0000000..de5da6b
--- /dev/null
+++ b/robotframework-onap/eteutils/JSONUtils.py
@@ -0,0 +1,41 @@
+import json
+
+from deepdiff import DeepDiff
+
+class JSONUtils:
+ """JSONUtils is common resource for simple json helper keywords."""
+
+ def json_equals(self, left, right):
+ """JSON Equals takes in two strings or json objects, converts them into json if needed and then compares them, returning if they are equal or not."""
+ if isinstance(left, basestring):
+ left_json = json.loads(left);
+ else:
+ left_json = left;
+ if isinstance(right, basestring):
+ right_json = json.loads(right);
+ else:
+ right_json = right;
+
+ ddiff = DeepDiff(left_json, right_json, ignore_order=True);
+ if ddiff == {}:
+ return True;
+ else:
+ return False;
+
+ def make_list_into_dict(self, listOfDicts, key):
+ """ Converts a list of dicts that contains a field that has a unique key into a dict of dicts """
+ d = {}
+ if isinstance(listOfDicts, list):
+ for thisDict in listOfDicts:
+ v = thisDict[key]
+ d[v] = thisDict
+ return d
+
+ def find_element_in_array(self, searchedArray, key, value):
+ """ Takes in an array and a key value, it will return the items in the array that has a key and value that matches what you pass in """
+ elements = [];
+ for item in searchedArray:
+ if key in item:
+ if item[key] == value:
+ elements.append(item);
+ return elements; \ No newline at end of file
diff --git a/robotframework-onap/eteutils/OSUtils.py b/robotframework-onap/eteutils/OSUtils.py
new file mode 100644
index 0000000..78968f0
--- /dev/null
+++ b/robotframework-onap/eteutils/OSUtils.py
@@ -0,0 +1,14 @@
+from sys import platform
+
+class OSUtils:
+ """ Utilities useful for constructing OpenStack HEAT requests """
+
+ def get_normalized_os(self):
+ os = platform
+ if platform == "linux" or platform == "linux2":
+ os = 'linux64'
+ elif platform == "darwin":
+ os = 'mac64'
+ elif platform == "win32":
+ os = platform
+ return os
diff --git a/robotframework-onap/eteutils/OpenstackLibrary.py b/robotframework-onap/eteutils/OpenstackLibrary.py
new file mode 100644
index 0000000..adb12db
--- /dev/null
+++ b/robotframework-onap/eteutils/OpenstackLibrary.py
@@ -0,0 +1,124 @@
+from robot.libraries.BuiltIn import BuiltIn
+import robot.utils
+import json
+
+class OpenstackLibrary:
+ """OpenstackLibrary manages the connection state and service catalog of an openstack instance."""
+
+ ROBOT_LIBRARY_SCOPE = 'Global'
+
+
+ def __init__(self):
+ self._cache = robot.utils.ConnectionCache('No connections created')
+ self.builtin = BuiltIn()
+
+ def save_openstack_auth(self, alias, response,token, version='v2.0'):
+ """Save Openstack Auth takes in an openstack auth response and saves it to allow easy retrival of token and service catalog"""
+ self.builtin.log('Creating connection: %s' % alias, 'DEBUG')
+ jsonResponse = json.loads(response);
+ jsonResponse['auth_token'] = token
+ jsonResponse['keystone_api_version'] = version
+ self._cache.register(jsonResponse, alias=alias)
+
+ def get_openstack_token(self, alias):
+ """Get Openstack auth token from the current alias"""
+ response = self._cache.switch(alias)
+ if isinstance(response, basestring):
+ jsonResponse = json.loads(response);
+ else:
+ jsonResponse = response;
+ if jsonResponse['keystone_api_version'] == 'v2.0':
+ return jsonResponse['access']['token']['id']
+ else:
+ return jsonResponse['auth_token']
+
+ def get_openstack_catalog(self, alias):
+ """Get Openstack service catalog from the current alias"""
+ response = self._cache.switch(alias)
+ if isinstance(response, basestring):
+ jsonResponse = json.loads(response);
+ else:
+ jsonResponse = response;
+ if jsonResponse['keystone_api_version'] == 'v2.0':
+ return jsonResponse['access']['serviceCatalog']
+ else:
+ return jsonResponse['token']['catalog']
+
+
+ def get_current_openstack_tenant(self, alias):
+ """Get Openstack tenant from the current alias"""
+ response = self._cache.switch(alias)
+ if isinstance(response, basestring):
+ jsonResponse = json.loads(response);
+ else:
+ jsonResponse = response;
+ if jsonResponse['keystone_api_version'] == 'v2.0':
+ return jsonResponse['access']['token']['tenant']
+ else:
+ return jsonResponse['token']['project']
+
+ def get_current_openstack_tenant_id(self, alias):
+ """Get Openstack tenant id from the current alias"""
+ tenant = self.get_current_openstack_tenant(alias);
+ return tenant['id']
+
+ def get_openstack_regions(self, alias):
+ """Get all Openstack regions from the current alias"""
+ response = self._cache.switch(alias)
+ if isinstance(response, basestring):
+ jsonResponse = json.loads(response);
+ else:
+ jsonResponse = response;
+ regions = [];
+ if jsonResponse['keystone_api_version'] == 'v2.0':
+ resp = jsonResponse['access']['serviceCatalog']
+ else:
+ resp = jsonResponse['token']['catalog']
+ for catalogEntry in resp:
+ listOfEndpoints = catalogEntry['endpoints'];
+ for endpoint in listOfEndpoints:
+ if 'region'in endpoint:
+ if endpoint['region'] not in regions:
+ regions.append(endpoint['region'])
+ return regions;
+
+ def get_openstack_service_url(self, alias, servicetype, region = None, tenant_id = None):
+ """Get Openstack service catalog from the current alias"""
+ response = self._cache.switch(alias)
+ if isinstance(response, basestring):
+ jsonResponse = json.loads(response);
+ else:
+ jsonResponse = response;
+ endPoint = None;
+ if jsonResponse['keystone_api_version'] == 'v2.0':
+ resp = jsonResponse['access']['serviceCatalog']
+ else:
+ resp = jsonResponse['token']['catalog']
+ for catalogEntry in resp:
+ if self.__determine_match(catalogEntry['type'], servicetype):
+ listOfEndpoints = catalogEntry['endpoints'];
+ # filter out non matching regions if provided
+ listOfEndpoints[:] = [x for x in listOfEndpoints if self.__determine_match(x['region'], region)];
+ # filter out non matching tenants if provided
+ # Only provide tenant id when authorizing without qualifying with tenant id
+ # WindRiver does not return the tenantId on the endpoint in this case.
+ if tenant_id is not None:
+ listOfEndpoints[:] = [y for y in listOfEndpoints if self.__determine_match(y['tenantId'], tenant_id)];
+ if jsonResponse['keystone_api_version'] == 'v3':
+ listOfEndpoints[:] = [z for z in listOfEndpoints if self.__determine_match(z['interface'], 'public')];
+ if len(listOfEndpoints) > 0:
+ if jsonResponse['keystone_api_version'] == 'v2.0':
+ endPoint = listOfEndpoints[0]['publicURL'];
+ else:
+ endPoint = listOfEndpoints[0]['url'];
+ if endPoint == None:
+ self.builtin.should_not_be_empty("", "Service Endpoint Url should not be empty")
+ return endPoint;
+
+ def __determine_match(self, listItem, item):
+ if item is None:
+ return True;
+ elif listItem == item:
+ return True;
+ else:
+ return False; \ No newline at end of file
diff --git a/robotframework-onap/eteutils/RequestsClientCert.py b/robotframework-onap/eteutils/RequestsClientCert.py
new file mode 100644
index 0000000..e1fd66f
--- /dev/null
+++ b/robotframework-onap/eteutils/RequestsClientCert.py
@@ -0,0 +1,7 @@
+
+class RequestsClientCert:
+ """RequestsClientCert allows adding a client cert to the Requests Robot Library."""
+
+ def add_client_cert(self, session, cert):
+ """Add Client Cert takes in a requests session object and a string path to the cert"""
+ session.cert = cert \ No newline at end of file
diff --git a/robotframework-onap/eteutils/StringTemplater.py b/robotframework-onap/eteutils/StringTemplater.py
new file mode 100644
index 0000000..43d107e
--- /dev/null
+++ b/robotframework-onap/eteutils/StringTemplater.py
@@ -0,0 +1,9 @@
+import json
+from string import Template
+
+class StringTemplater:
+ """StringTemplater is common resource for templating with strings."""
+
+ def template_string(self, template, values):
+ """Template String takes in a string and its values and converts it using the string.Template class"""
+ return Template(template).substitute(values) \ No newline at end of file
diff --git a/robotframework-onap/eteutils/TemplatingEngine.py b/robotframework-onap/eteutils/TemplatingEngine.py
new file mode 100644
index 0000000..0f579e7
--- /dev/null
+++ b/robotframework-onap/eteutils/TemplatingEngine.py
@@ -0,0 +1,34 @@
+# Copyright 2019 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from jinja2 import Environment, FileSystemLoader, select_autoescape
+
+
+class TemplatingEngine:
+ """TemplateImporter is common resource for templating with strings."""
+
+ jinja_env = None
+
+ def __init__(self, templates_folder):
+ self.jinja_env = Environment(
+ loader=FileSystemLoader(templates_folder),
+ autoescape=select_autoescape(['html', 'xml'])
+ )
+
+ def apply_template(self, template_location, values):
+ """returns a string that is the jinja template in template_location filled in via the dictionary in values """
+ print
+ template = self.jinja_env.get_template(template_location)
+ return template.render(values) \ No newline at end of file
diff --git a/robotframework-onap/eteutils/UUID.py b/robotframework-onap/eteutils/UUID.py
new file mode 100644
index 0000000..35c26a7
--- /dev/null
+++ b/robotframework-onap/eteutils/UUID.py
@@ -0,0 +1,15 @@
+import uuid
+import time
+import datetime
+
+class UUID:
+ """UUID is a simple library that generates a uuid"""
+
+ def generate_UUID(self):
+ """generate a uuid"""
+ return uuid.uuid4()
+
+ def generate_MilliTimestamp_UUID(self):
+ """generate a millisecond timestamp uuid"""
+ then = datetime.datetime.now()
+ return int(time.mktime(then.timetuple())*1e3 + then.microsecond/1e3) \ No newline at end of file
diff --git a/robotframework-onap/eteutils/__init__.py b/robotframework-onap/eteutils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/robotframework-onap/eteutils/__init__.py
diff --git a/robotframework-onap/eteutils/csvLibrary.py b/robotframework-onap/eteutils/csvLibrary.py
new file mode 100644
index 0000000..b38b4a5
--- /dev/null
+++ b/robotframework-onap/eteutils/csvLibrary.py
@@ -0,0 +1,16 @@
+import csv
+class csvLibrary(object):
+
+ def read_csv_file(self, filename):
+ '''This creates a keyword named "Read CSV File"
+
+ This keyword takes one argument, which is a path to a .csv file. It
+ returns a list of rows, with each row being a list of the data in
+ each column.
+ '''
+ data = []
+ with open(filename, 'rb') as csvfile:
+ reader = csv.reader(csvfile)
+ for row in reader:
+ data.append(row)
+ return data
diff --git a/robotframework-onap/loadtest/RunEte.py b/robotframework-onap/loadtest/RunEte.py
new file mode 100644
index 0000000..5012e7d
--- /dev/null
+++ b/robotframework-onap/loadtest/RunEte.py
@@ -0,0 +1,39 @@
+'''
+Created on Apr 7, 2017
+
+@author: jf9860
+'''
+from threading import Thread
+import subprocess
+import os
+from datetime import datetime
+import logging
+
+class RunEte(Thread):
+ '''
+ classdocs
+ '''
+ robot_test = ""
+ robot_command = "runEteTag.sh"
+ soaksubfolder = ""
+ test_number =0
+
+ def __init__(self, test_name, soaksubfolder, test_number):
+ '''
+ Constructor
+ '''
+ super(RunEte, self).__init__()
+ self.robot_test = test_name
+ self.soaksubfolder = soaksubfolder
+ self.test_number = test_number
+
+ def run(self):
+ logging.info("{} ({}) started - {}".format(self.getName(), self.robot_test, str(datetime.now())))
+ try:
+ ''' Add the '/' here so that the shell doesn't require a subfolder... '''
+ env = dict(os.environ, SOAKSUBFOLDER=self.soaksubfolder + "/")
+ output = subprocess.check_output(["bash", self.robot_command, self.robot_test, self.test_number], shell=False, env=env)
+ logging.info("{} ({}) {}".format(self.getName(), self.robot_test, output))
+ except Exception, e:
+ logging.error("{} ({}) Unexpected error {}".format(self.getName(), self.robot_test, repr(e)))
+ logging.info("{} ({}) ended - {}".format(self.getName(), self.robot_test, str(datetime.now())))
diff --git a/robotframework-onap/loadtest/TestConfig.py b/robotframework-onap/loadtest/TestConfig.py
new file mode 100644
index 0000000..b9b8112
--- /dev/null
+++ b/robotframework-onap/loadtest/TestConfig.py
@@ -0,0 +1,59 @@
+'''
+Created on Apr 7, 2017
+
+@author: jf9860
+'''
+import json
+
+class TestConfig(object):
+ '''
+ The profile defines a cycle of tests. Each entry is defined as
+ [<seconds to wait>, [<list of ete tags to run after the wait]],
+ '''
+ profile = [
+ [0, ["health"]],
+ ]
+
+ duration=10
+ cyclelength=60
+
+ def __init__(self, duration=None, cyclelength=None, json=None):
+ '''
+ Constructor
+ '''
+ if (json != None):
+ self.parseConfig(json)
+ if (duration != None):
+ self.duration = duration
+ if (cyclelength != None):
+ self.cyclelength = cyclelength
+ running_time = 0
+ for p in self.profile:
+ secs = p[0]
+ running_time = running_time + secs
+ if (running_time < self.cyclelength):
+ last = self.cyclelength - running_time
+ self.profile.append([last, []])
+
+ def parseConfig(self, fileName):
+ with open(fileName) as data_file:
+ config = json.load(data_file)
+ self.profile = config["profile"]
+ self.cyclelength = config["cyclelength"]
+ self.duration = config["duration"]
+
+
+ def to_string(self):
+ pstring = 'Cycle length is {} seconds'.format(self.cyclelength)
+ pstring = '{}\nDuration is {} seconds'.format(pstring, self.duration)
+ running_time = 0
+ for p in self.profile:
+ secs = p[0]
+ running_time = running_time + secs
+ for ete in p[1]:
+ pstring = "{0}\n{1:08d} : {2:08d} : {3}".format(pstring, secs, running_time, ete)
+ if (len(p[1]) == 0):
+ pstring = "{0}\n{1:08d} : {2:08d} : {3}".format(pstring, secs, running_time, "")
+ return pstring
+
+
diff --git a/robotframework-onap/loadtest/TestController.py b/robotframework-onap/loadtest/TestController.py
new file mode 100644
index 0000000..3ba0083
--- /dev/null
+++ b/robotframework-onap/loadtest/TestController.py
@@ -0,0 +1,80 @@
+'''
+Created on Apr 7, 2017
+
+@author: jf9860
+'''
+import time
+import os
+from loadtest.RunEte import RunEte
+from loadtest.TestConfig import TestConfig
+import logging
+
+class TestController(object):
+ '''
+ classdocs
+ '''
+
+ threads = {}
+ threadid = 0
+ soaksubfolder = 'soak_' + str(os.getpid())
+ test_number = 0
+
+ def __init__(self, options):
+ '''
+ Constructor
+ '''
+ self.config = TestConfig(duration=options.duration, cyclelength=options.cyclelength, json=options.profile)
+ logging.info(self.config.to_string())
+
+ def execute(self):
+ starttime = time.time()
+ endtime = starttime + self.config.duration
+ profileindex = 0
+ currenttime = time.time()
+ logging.info("{}:{}:{}".format(starttime, endtime, currenttime))
+ while currenttime < endtime:
+ if (profileindex >= len(self.config.profile)):
+ profileindex = 0
+ profile = self.config.profile[profileindex]
+ sleeptime = profile[0]
+ currenttime = time.time()
+ if ((currenttime + sleeptime) < endtime):
+ time.sleep(sleeptime)
+ self.schedule(profile)
+ profileindex = profileindex + 1
+ currenttime = time.time()
+ else:
+ currenttime = endtime
+
+ for threadname in self.threads:
+ logging.info("TestController waiting on " + threadname)
+ t = self.threads[threadname]
+ t.join()
+ logging.info("Soak test completed")
+
+ def schedule(self, profile):
+ self.remove_completed_threads()
+ tests = profile[1]
+ for test in tests:
+ self.schedule_one(test)
+
+ def schedule_one(self, test):
+ self.test_number = self.test_number + 1
+ self.threadid = self.threadid + 1
+ threadname = "RunEte_" + str(self.threadid)
+ ''' test for max threads '''
+ t = RunEte(test, self.soaksubfolder, str(self.test_number))
+ t.setName(threadname)
+ t.start()
+ self.threads[threadname] = t
+
+
+ def remove_completed_threads(self):
+ toremove = []
+ for threadname in self.threads:
+ t = self.threads[threadname]
+ if (t.isAlive() == False):
+ toremove.append(threadname)
+ for threadname in toremove:
+ logging.info("Removing " + threadname)
+ del(self.threads[threadname]) \ No newline at end of file
diff --git a/robotframework-onap/loadtest/TestMain.py b/robotframework-onap/loadtest/TestMain.py
new file mode 100644
index 0000000..81c635f
--- /dev/null
+++ b/robotframework-onap/loadtest/TestMain.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+# encoding: utf-8
+'''
+loadtest.TestMain -- shortdesc
+
+loadtest.TestMain is a description
+
+It defines classes_and_methods
+
+@author: user_name
+
+@copyright: 2017 organization_name. All rights reserved.
+
+@license: license
+
+@contact: user_email
+@deffield updated: Updated
+'''
+
+import sys
+import os
+
+from optparse import OptionParser, Values
+
+from loadtest.TestController import TestController
+
+__all__ = []
+__version__ = 0.1
+__date__ = '2017-04-07'
+__updated__ = '2017-04-07'
+
+DEBUG = 1
+TESTRUN = 0
+PROFILE = 0
+import time
+import logging
+
+def main(argv=None):
+ '''Command line options.'''
+ program_name = os.path.basename(sys.argv[0])
+ program_version = "v0.1"
+ program_build_date = "%s" % __updated__
+
+ program_version_string = '%%prog %s (%s)' % (program_version, program_build_date)
+ #program_usage = '''usage: spam two eggs''' # optional - will be autogenerated by optparse
+ program_longdesc = '''''' # optional - give further explanation about what the program does
+ program_license = "Copyright 2017 user_name (organization_name) \
+ Licensed under the Apache License 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0"
+
+ if argv is None:
+ argv = sys.argv[1:]
+ try:
+ # setup option parser
+ parser = OptionParser(version=program_version_string, epilog=program_longdesc, description=program_license)
+ parser.add_option("-d", "--duration", dest="duration", help="duration of soak test in seconds [default: %default]", type=int)
+ parser.add_option("-l", "--logfile", dest="logfile", help="Full path soak log file name")
+ parser.add_option("-c", "--cyclelength", dest="cyclelength", help="Length of a single cycle through the config.\nMust be longer than a single iteration", type=int)
+ parser.add_option("-p", "--profile", dest="profile", help="Filename of json profile file")
+ parser.set_defaults(logfile="")
+ (opts, args) = parser.parse_args(argv)
+
+ if (opts.logfile != ""):
+ logging.basicConfig(filename=opts.logfile, level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.DEBUG)
+ controller = TestController(opts)
+ controller.execute()
+
+ except Exception, e:
+ indent = len(program_name) * " "
+ sys.stderr.write(program_name + ": " + repr(e) + "\n")
+ sys.stderr.write(indent + " for help use --help")
+ return 2
+
+
+if __name__ == "__main__":
+ if DEBUG:
+ print "debug"
+ if TESTRUN:
+ import doctest
+ doctest.testmod()
+ if PROFILE:
+ import cProfile
+ import pstats
+ profile_filename = 'loadtest.TestMain_profile.txt'
+ cProfile.run('main()', profile_filename)
+ statsfile = open("profile_stats.txt", "wb")
+ p = pstats.Stats(profile_filename, stream=statsfile)
+ stats = p.strip_dirs().sort_stats('cumulative')
+ stats.print_stats()
+ statsfile.close()
+ sys.exit(0)
+ sys.exit(main()) \ No newline at end of file
diff --git a/robotframework-onap/loadtest/__init__.py b/robotframework-onap/loadtest/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/robotframework-onap/loadtest/__init__.py
diff --git a/robotframework-onap/pom.xml b/robotframework-onap/pom.xml
new file mode 100644
index 0000000..8c6c05b
--- /dev/null
+++ b/robotframework-onap/pom.xml
@@ -0,0 +1,85 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <packaging>pom</packaging>
+ <groupId>org.onap.testsuite.python-testing-utils</groupId>
+ <artifactId>robotframework-onap</artifactId>
+ <name>robotframework-onap</name>
+ <version>0.4.0-SNAPSHOT</version>
+ <description>Scripts written to be used during robot framework testing</description>
+ <parent>
+ <groupId>org.onap.oparent</groupId>
+ <artifactId>oparent-python</artifactId>
+ <version>1.2.3</version>
+ <relativePath/>
+ </parent>
+ <properties>
+ <!-- this wont work because pip applies a regex to names while this doesnt https://www.python.org/dev/peps/pep-0427/#id12
+ <wheel.name>${project.artifactId}-${python_version}-py2-none-any.whl</wheel.name>-->
+ <wheel.name>robotframework_onap-${python_version}-py2-none-any.whl</wheel.name>
+ <python.sourceDirectory>${project.basedir}</python.sourceDirectory>
+ <python.pypi.repository>https://nexus3.onap.org/repository/PyPi.snapshot/</python.pypi.repository>
+ </properties>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/maven-python/dist/${wheel.name}</file>
+ <type>whl</type>
+ </artifact>
+ </artifacts>
+ <skipAttach>false</skipAttach>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>com.github.UltimateDogg</groupId>
+ <artifactId>maven-python-distribute-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>package</id>
+ <goals>
+ <goal>package</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>process</id>
+ <goals>
+ <goal>process-sources</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>deploy</id>
+ <goals>
+ <goal>deploy</goal>
+ </goals>
+ </execution>
+ </executions>
+ <configuration>
+ <repository>${python.pypi.repository}</repository>
+ <sourceDirectory>${python.sourceDirectory}</sourceDirectory>
+ </configuration>
+ </plugin>
+ <plugin>
+ <artifactId>exec-maven-plugin</artifactId>
+ <groupId>org.codehaus.mojo</groupId>
+ <executions>
+ <execution>
+ <id>tox-test</id>
+ <configuration>
+ <workingDirectory>${project.build.directory}</workingDirectory>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/robotframework-onap/setup-template.py b/robotframework-onap/setup-template.py
new file mode 100644
index 0000000..e8d3dcf
--- /dev/null
+++ b/robotframework-onap/setup-template.py
@@ -0,0 +1,48 @@
+# Copyright 2019 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from setuptools import setup
+
+setup(
+ name='${PROJECT_NAME}', # This is the name of your PyPI-package.
+ keywords=("utils", "robotframework", "testing", "onap"),
+ version='${VERSION}', # Update the version number for new releases
+ license="Apache 2.0",
+ description='Scripts written to be used during robot framework testing', # Info about script
+ long_description="python-package that provides convenience methods to make certain tasks in robot framework easier."
+ "since this uses robot framework internal libraries or may in the future, it is not meant as a"
+ "general purpose library",
+ url="https://github.com/onap/testsuite-python-testing-utils",
+ platforms=['all'],
+ install_requires=[
+ 'dnspython',
+ 'paramiko',
+ 'pyyaml',
+ 'robotframework',
+ 'deepdiff>=2.5,<3.3',
+ 'Jinja2'
+ ], # what we need
+ packages=['eteutils', 'loadtest', 'vcpeutils'], # The name of your scripts package
+ package_dir={'eteutils': 'eteutils', 'loadtest': 'loadtest', 'vcpeutils':'vcpeutils'}, # The location of your scipts package
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'Intended Audience :: Developers',
+ 'Programming Language :: Python :: 2.7',
+ 'Environment :: Plugins',
+ 'Framework :: Robot Framework',
+ 'Framework :: Robot Framework :: Library',
+ 'License :: OSI Approved :: Apache Software License'
+ ]
+)
diff --git a/robotframework-onap/setup.cfg b/robotframework-onap/setup.cfg
new file mode 100644
index 0000000..493416b
--- /dev/null
+++ b/robotframework-onap/setup.cfg
@@ -0,0 +1,5 @@
+[bdist_wheel]
+# This flag says that the code is written to work on both Python 2 and Python
+# 3. If at all possible, it is good practice to do this. If you cannot, you
+# will need to generate wheels for each Python version that you support.
+universal=0 \ No newline at end of file
diff --git a/robotframework-onap/setup.py b/robotframework-onap/setup.py
new file mode 100644
index 0000000..c5460de
--- /dev/null
+++ b/robotframework-onap/setup.py
@@ -0,0 +1,48 @@
+# Copyright 2019 AT&T Intellectual Property. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from setuptools import setup
+
+setup(
+ name='robotframework-onap', # This is the name of your PyPI-package.
+ keywords=("utils", "robotframework", "testing", "onap"),
+ version='0.4', # Update the version number for new releases
+ license="Apache 2.0",
+ description='Scripts written to be used during robot framework testing', # Info about script
+ long_description="python-package that provides convenience methods to make certain tasks in robot framework easier."
+ "since this uses robot framework internal libraries or may in the future, it is not meant as a"
+ "general purpose library",
+ url="https://github.com/onap/testsuite-python-testing-utils",
+ platforms=['all'],
+ install_requires=[
+ 'dnspython',
+ 'paramiko',
+ 'pyyaml',
+ 'robotframework',
+ 'deepdiff>=2.5,<3.3',
+ 'Jinja2'
+ ], # what we need
+ packages=['eteutils', 'loadtest', 'vcpeutils'], # The name of your scripts package
+ package_dir={'eteutils': 'eteutils', 'loadtest': 'loadtest', 'vcpeutils':'vcpeutils'}, # The location of your scipts package
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'Intended Audience :: Developers',
+ 'Programming Language :: Python :: 2.7',
+ 'Environment :: Plugins',
+ 'Framework :: Robot Framework',
+ 'Framework :: Robot Framework :: Library',
+ 'License :: OSI Approved :: Apache Software License'
+ ]
+)
diff --git a/robotframework-onap/tox.ini b/robotframework-onap/tox.ini
new file mode 100644
index 0000000..42183a7
--- /dev/null
+++ b/robotframework-onap/tox.ini
@@ -0,0 +1,12 @@
+# Tox (https://tox.readthedocs.io/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = py27
+
+[testenv]
+commands = {envpython} setup.py test
+deps =
+
diff --git a/robotframework-onap/vcpeutils/SoUtils.py b/robotframework-onap/vcpeutils/SoUtils.py
new file mode 100755
index 0000000..1248613
--- /dev/null
+++ b/robotframework-onap/vcpeutils/SoUtils.py
@@ -0,0 +1,371 @@
+#! /usr/bin/python
+
+import sys
+import logging
+import requests
+import json
+from datetime import datetime
+#import progressbar
+import time
+import csar_parser
+import preload
+from vcpecommon import *
+from robot.api import logger
+
+
+class SoUtils:
+
+ def __init__(self):
+ """
+ :param api_version: must be 'v4' or 'v5'
+ """
+ self.tmp_solution_for_so_bug = False
+ #self.logger = logging.getLogger(__name__)
+ self.logger = logger
+ self.vcpecommon = VcpeCommon()
+ self.api_version = 'v4'
+ self.service_req_api_url = self.vcpecommon.so_req_api_url[self.api_version]
+
+
+ def submit_create_req(self, req_json, req_type, service_instance_id=None, vnf_instance_id=None):
+ """
+ POST {serverRoot}/serviceInstances/v4
+ POST {serverRoot}/serviceInstances/v4/{serviceInstanceId}/vnfs
+ POST {serverRoot}/serviceInstances/v4/{serviceInstanceId}/networks
+ POST {serverRoot}/serviceInstances/v4/{serviceInstanceId}/vnfs/{vnfInstanceId}/vfModules
+ :param req_json:
+ :param service_instance_id: this is required only for networks, vnfs, and vf modules
+ :param req_type:
+ :param vnf_instance_id:
+ :return: req_id, instance_id
+ """
+ if req_type == 'service':
+ url = self.service_req_api_url
+ elif req_type == 'vnf':
+ url = '/'.join([self.service_req_api_url, service_instance_id, 'vnfs'])
+ elif req_type == 'network':
+ url = '/'.join([self.service_req_api_url, service_instance_id, 'networks'])
+ elif req_type == 'vfmodule':
+ url = '/'.join([self.service_req_api_url, service_instance_id, 'vnfs', vnf_instance_id, 'vfModules'])
+ else:
+ self.logger.error('Invalid request type: {0}. Can only be service/vnf/network/vfmodule'.format(req_type))
+ return None, None
+
+ self.logger.info(url)
+ r = requests.post(url, headers=self.vcpecommon.so_headers, auth=self.vcpecommon.so_userpass, json=req_json)
+ self.logger.debug(r)
+ response = r.json()
+
+ self.logger.debug('---------------------------------------------------------------')
+ self.logger.debug('------- Creation request submitted to SO, got response --------')
+ self.logger.debug(json.dumps(response, indent=4, sort_keys=True))
+ self.logger.debug('---------------------------------------------------------------')
+ req_id = response.get('requestReferences', {}).get('requestId', '')
+ instance_id = response.get('requestReferences', {}).get('instanceId', '')
+
+ return req_id, instance_id
+
+ def check_progress(self, req_id, eta=0, interval=5):
+ if not req_id:
+ self.logger.error('Error when checking SO request progress, invalid request ID: ' + req_id)
+ return False
+ duration = 0.0
+ #bar = progressbar.ProgressBar(redirect_stdout=True)
+ url = self.vcpecommon.so_check_progress_api_url + '/' + req_id
+
+ while True:
+ time.sleep(interval)
+ r = requests.get(url, headers=self.vcpecommon.so_headers, auth=self.vcpecommon.so_userpass)
+ response = r.json()
+
+ duration += interval
+ if eta > 0:
+ percentage = min(95, 100 * duration / eta)
+ else:
+ percentage = int(response['request']['requestStatus']['percentProgress'])
+
+ if response['request']['requestStatus']['requestState'] == 'IN_PROGRESS':
+ self.logger.debug('------------------Request Status-------------------------------')
+ self.logger.debug(json.dumps(response, indent=4, sort_keys=True))
+ #bar.update(percentage)
+ else:
+ self.logger.debug('---------------------------------------------------------------')
+ self.logger.debug('----------------- Creation Request Results --------------------')
+ self.logger.debug(json.dumps(response, indent=4, sort_keys=True))
+ self.logger.debug('---------------------------------------------------------------')
+ flag = response['request']['requestStatus']['requestState'] == 'COMPLETE'
+ if not flag:
+ self.logger.error('Request failed.')
+ self.logger.error(json.dumps(response, indent=4, sort_keys=True))
+ #bar.update(100)
+ #bar.finish()
+ return flag
+
+ def add_req_info(self, req_details, instance_name, product_family_id=None):
+ req_details['requestInfo'] = {
+ 'instanceName': instance_name,
+ 'source': 'VID',
+ 'suppressRollback': 'true',
+ 'requestorId': 'vCPE-Robot'
+ }
+ if product_family_id:
+ req_details['requestInfo']['productFamilyId'] = product_family_id
+
+ def add_related_instance(self, req_details, instance_id, instance_model):
+ instance = {"instanceId": instance_id, "modelInfo": instance_model}
+ if 'relatedInstanceList' not in req_details:
+ req_details['relatedInstanceList'] = [{"relatedInstance": instance}]
+ else:
+ req_details['relatedInstanceList'].append({"relatedInstance": instance})
+
+ def generate_vnf_or_network_request(self, req_type, instance_name, vnf_or_network_model, service_instance_id,
+ service_model):
+ req_details = {
+ 'modelInfo': vnf_or_network_model,
+ #'cloudConfiguration': {"lcpCloudRegionId": self.vcpecommon.os_region_name,
+ # "tenantId": self.vcpecommon.os_tenant_id},
+ 'cloudConfiguration': {"lcpCloudRegionId": self.region_name,
+ "tenantId": self.tenant_id},
+ 'requestParameters': {"userParams": []},
+ 'platform': {"platformName": "Platform-Demonstration"}
+ }
+ self.add_req_info(req_details, instance_name, self.vcpecommon.product_family_id)
+ self.add_related_instance(req_details, service_instance_id, service_model)
+ return {'requestDetails': req_details}
+
+ def generate_vfmodule_request(self, instance_name, vfmodule_model, service_instance_id,
+ service_model, vnf_instance_id, vnf_model):
+ req_details = {
+ 'modelInfo': vfmodule_model,
+ #'cloudConfiguration': {"lcpCloudRegionId": self.vcpecommon.os_region_name,
+ # "tenantId": self.vcpecommon.os_tenant_id},
+ 'cloudConfiguration': {"lcpCloudRegionId": self.region_name,
+ "tenantId": self.tenant_id},
+ 'requestParameters': {"usePreload": 'true'}
+ }
+ self.add_req_info(req_details, instance_name, self.vcpecommon.product_family_id)
+ self.add_related_instance(req_details, service_instance_id, service_model)
+ self.add_related_instance(req_details, vnf_instance_id, vnf_model)
+ return {'requestDetails': req_details}
+
+ def generate_service_request(self, instance_name, model):
+ req_details = {
+ 'modelInfo': model,
+ 'subscriberInfo': {'globalSubscriberId': self.vcpecommon.global_subscriber_id},
+ 'requestParameters': {
+ "userParams": [],
+ "subscriptionServiceType": "vCPE",
+ "aLaCarte": 'true'
+ }
+ }
+ self.add_req_info(req_details, instance_name)
+ self.add_project_info(req_details)
+ self.add_owning_entity(req_details)
+ return {'requestDetails': req_details}
+
+ def add_project_info(self, req_details):
+ req_details['project'] = {'projectName': self.vcpecommon.project_name}
+
+ def add_owning_entity(self, req_details):
+ req_details['owningEntity'] = {'owningEntityId': self.vcpecommon.owning_entity_id,
+ 'owningEntityName': self.vcpecommon.owning_entity_name}
+
+ def generate_custom_service_request(self, instance_name, model, brg_mac):
+ brg_mac_enc = brg_mac.replace(':', '-')
+ req_details = {
+ 'modelInfo': model,
+ 'subscriberInfo': {'subscriberName': 'Kaneohe',
+ 'globalSubscriberId': self.vcpecommon.global_subscriber_id},
+ 'cloudConfiguration': {"lcpCloudRegionId": self.region_name,
+ "tenantId": self.tenant_id},
+ 'requestParameters': {
+ "userParams": [
+ {
+ 'name': 'BRG_WAN_MAC_Address',
+ 'value': brg_mac
+ },
+ {
+ 'name': 'VfModuleNames',
+ 'value': [
+ {
+ 'VfModuleModelInvariantUuid': self.vcpecommon.vgw_VfModuleModelInvariantUuid,
+ 'VfModuleName': 'VGW2BRG-{0}'.format(brg_mac_enc)
+ }
+ ]
+ },
+ {
+ "name": "Customer_Location",
+ "value": self.vcpecommon.customer_location_used_by_oof
+ },
+ {
+ "name": "Homing_Solution",
+ "value": self.vcpecommon.homing_solution
+ }
+ ],
+ "subscriptionServiceType": "vCPE",
+ 'aLaCarte': 'false'
+ }
+ }
+ self.add_req_info(req_details, instance_name, self.vcpecommon.custom_product_family_id)
+ self.add_project_info(req_details)
+ self.add_owning_entity(req_details)
+ return {'requestDetails': req_details}
+
+ def create_custom_service(self, csar_file, brg_mac, name_suffix=None):
+ parser = csar_parser.CsarParser()
+ if not parser.parse_csar(csar_file):
+ return False
+
+ # yyyymmdd_hhmm
+ if not name_suffix:
+ name_suffix = '_' + datetime.now().strftime('%Y%m%d%H%M')
+
+ # create service
+ instance_name = '_'.join([self.vcpecommon.instance_name_prefix['service'],
+ parser.svc_model['modelName'][0:10], name_suffix])
+ instance_name = instance_name.lower()
+ req = self.generate_custom_service_request(instance_name, parser.svc_model, brg_mac)
+ self.logger.info(json.dumps(req, indent=2, sort_keys=True))
+ self.logger.info('Creating custom service {0}.'.format(instance_name))
+ req_id, svc_instance_id = self.submit_create_req(req, 'service')
+ if not self.check_progress(req_id, 140):
+ return False
+ return True
+
+ def wait_for_aai(self, node_type, uuid):
+ self.logger.info('Waiting for AAI traversal to complete...')
+ #bar = progressbar.ProgressBar()
+ for i in range(30):
+ time.sleep(1)
+ #bar.update(i*100.0/30)
+ if self.vcpecommon.is_node_in_aai(node_type, uuid):
+ #bar.update(100)
+ #bar.finish()
+ return
+
+ self.logger.error("AAI traversal didn't finish in 30 seconds. Something is wrong. Type {0}, UUID {1}".format(
+ node_type, uuid))
+ sys.exit()
+
+ def create_entire_service(self, csar_file, vnf_template_file, preload_dict, name_suffix, region_name, tenant_id, heatbridge=False):
+ """
+ :param csar_file:
+ :param vnf_template_file:
+ :param preload_dict:
+ :param name_suffix:
+ :return: service instance UUID
+ """
+ self.region_name=region_name
+ self.tenant_id=tenant_id
+ self.logger.info('\n----------------------------------------------------------------------------------')
+ self.logger.info('Start to create entire service defined in csar: {0}'.format(csar_file))
+ parser = csar_parser.CsarParser()
+ self.logger.info('Parsing csar ...')
+ if not parser.parse_csar(csar_file):
+ self.logger.error('Cannot parse csar: {0}'.format(csar_file))
+ return None
+
+ # Set Global timestamp for instancenames
+ global_timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
+ # create service
+ instance_name = '_'.join([self.vcpecommon.instance_name_prefix['service'],
+ parser.svc_model['modelName'], global_timestamp, name_suffix])
+ instance_name = instance_name.lower()
+ instance_name = instance_name.replace(' ','')
+ instance_name = instance_name.replace(':','')
+ self.logger.info('Creating service instance: {0}.'.format(instance_name))
+ req = self.generate_service_request(instance_name, parser.svc_model)
+ self.logger.debug(json.dumps(req, indent=2, sort_keys=True))
+ req_id, svc_instance_id = self.submit_create_req(req, 'service')
+ if not self.check_progress(req_id, eta=2, interval=5):
+ return None
+
+ # wait for AAI to complete traversal
+ self.wait_for_aai('service', svc_instance_id)
+
+ # create networks
+ for model in parser.net_models:
+ base_name = model['modelCustomizationName'].lower().replace('mux_vg', 'mux_gw')
+ network_name = '_'.join([self.vcpecommon.instance_name_prefix['network'], base_name, name_suffix])
+ network_name = network_name.lower()
+ self.logger.info('Creating network: ' + network_name)
+ req = self.generate_vnf_or_network_request('network', network_name, model, svc_instance_id,
+ parser.svc_model)
+ self.logger.debug(json.dumps(req, indent=2, sort_keys=True))
+ req_id, net_instance_id = self.submit_create_req(req, 'network', svc_instance_id)
+ if not self.check_progress(req_id, eta=20):
+ return None
+
+ self.logger.info('Changing subnet name to ' + self.vcpecommon.network_name_to_subnet_name(network_name))
+ self.vcpecommon.set_network_name(network_name)
+ subnet_name_changed = False
+ for i in range(20):
+ time.sleep(3)
+ if self.vcpecommon.set_subnet_name(network_name):
+ subnet_name_changed = True
+ break
+
+ if not subnet_name_changed:
+ self.logger.error('Failed to change subnet name for ' + network_name)
+ return None
+
+
+ vnf_model = None
+ vnf_instance_id = None
+ # create VNF
+ if len(parser.vnf_models) == 1:
+ vnf_model = parser.vnf_models[0]
+ vnf_instance_name = '_'.join([self.vcpecommon.instance_name_prefix['vnf'],
+ vnf_model['modelCustomizationName'].split(' ')[0], name_suffix])
+ vnf_instance_name = vnf_instance_name.lower()
+ vnf_instance_name = vnf_instance_name.replace(' ','')
+ vnf_instance_name = vnf_instance_name.replace(':','')
+ self.logger.info('Creating VNF: ' + vnf_instance_name)
+ req = self.generate_vnf_or_network_request('vnf', vnf_instance_name, vnf_model, svc_instance_id,
+ parser.svc_model)
+ self.logger.debug(json.dumps(req, indent=2, sort_keys=True))
+ req_id, vnf_instance_id = self.submit_create_req(req, 'vnf', svc_instance_id)
+ if not self.check_progress(req_id, eta=2, interval=5):
+ self.logger.error('Failed to create VNF {0}.'.format(vnf_instance_name))
+ return False
+
+ # wait for AAI to complete traversal
+ if not vnf_instance_id:
+ self.logger.error('No VNF instance ID returned!')
+ sys.exit()
+ self.wait_for_aai('vnf', vnf_instance_id)
+
+ # SDNC Preload
+
+ preloader = preload.Preload(self.vcpecommon)
+ preloader.preload_vfmodule(vnf_template_file, svc_instance_id, parser.vnf_models[0], parser.vfmodule_models[0],
+ preload_dict, name_suffix)
+
+ # create VF Module
+ if len(parser.vfmodule_models) == 1:
+ if not vnf_instance_id or not vnf_model:
+ self.logger.error('Invalid VNF instance ID or VNF model!')
+ sys.exit()
+
+ model = parser.vfmodule_models[0]
+ vfmodule_instance_name = '_'.join([self.vcpecommon.instance_name_prefix['vfmodule'],
+ model['modelCustomizationName'].split('..')[0], name_suffix])
+ vfmodule_instance_name = vfmodule_instance_name.lower()
+ vfmodule_instance_name = vfmodule_instance_name.replace(' ','')
+ vfmoduel_instance_name = vfmodule_instance_name.replace(':','')
+ self.logger.info('Creating VF Module: ' + vfmodule_instance_name)
+ req = self.generate_vfmodule_request(vfmodule_instance_name, model, svc_instance_id, parser.svc_model,
+ vnf_instance_id, vnf_model)
+ self.logger.debug(json.dumps(req, indent=2, sort_keys=True))
+ req_id, vfmodule_instance_id = self.submit_create_req(req, 'vfmodule', svc_instance_id, vnf_instance_id)
+ if not self.check_progress(req_id, eta=70, interval=50):
+ self.logger.error('Failed to create VF Module {0}.'.format(vfmodule_instance_name))
+ return None
+
+ # run heatbridge
+ # removed until we fold in heatbridge
+ #if heatbridge:
+ #self.vcpecommon.heatbridge(vfmodule_instance_name, svc_instance_id)
+ #self.vcpecommon.save_vgmux_vnf_name(vnf_instance_name)
+
+ return svc_instance_id
diff --git a/robotframework-onap/vcpeutils/__init__.py b/robotframework-onap/vcpeutils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/robotframework-onap/vcpeutils/__init__.py
diff --git a/robotframework-onap/vcpeutils/csar_parser.py b/robotframework-onap/vcpeutils/csar_parser.py
new file mode 100755
index 0000000..f101364
--- /dev/null
+++ b/robotframework-onap/vcpeutils/csar_parser.py
@@ -0,0 +1,231 @@
+#! /usr/bin/python
+import os
+import zipfile
+import shutil
+import yaml
+import json
+import logging
+
+
+class CsarParser:
+ def __init__(self):
+ self.logger = logging.getLogger(__name__)
+ self.svc_model = {}
+ self.net_models = [] # there could be multiple networks
+ self.vnf_models = [] # this version only support a single VNF in the service template
+ self.vfmodule_models = [] # this version only support a single VF module in the service template
+
+ def get_service_yaml_from_csar(self, csar_file):
+ """
+ :param csar_file: csar file path name, e.g. 'csar/vgmux.csar'
+ :return:
+ """
+ tmpdir = './__tmp'
+ if os.path.isdir(tmpdir):
+ shutil.rmtree(tmpdir)
+ os.mkdir(tmpdir)
+
+ with zipfile.ZipFile(csar_file, "r") as zip_ref:
+ zip_ref.extractall(tmpdir)
+
+ yamldir = tmpdir + '/Definitions'
+ if os.path.isdir(yamldir):
+ for filename in os.listdir(yamldir):
+ # look for service template like this: service-Vcpesvcbrgemu111601-template.yml
+ if filename.startswith('service-') and filename.endswith('-template.yml'):
+ return os.path.join(yamldir, filename)
+
+ self.logger.error('Invalid file: ' + csar_file)
+ return ''
+
+ def get_service_model_info(self, svc_template):
+ """ extract service model info from yaml and convert to what to be used in SO request
+ Sample from yaml:
+ {
+ "UUID": "aed4fc5e-b871-4e26-8531-ceabd46df85e",
+ "category": "Network L1-3",
+ "description": "Infra service",
+ "ecompGeneratedNaming": true,
+ "invariantUUID": "c806682a-5b3a-44d8-9e88-0708be151296",
+ "name": "vcpesvcinfra111601",
+ "namingPolicy": "",
+ "serviceEcompNaming": true,
+ "serviceRole": "",
+ "serviceType": "",
+ "type": "Service"
+ },
+
+ Convert to
+ {
+ "modelType": "service",
+ "modelInvariantId": "ca4c7a70-06fd-45d8-8b9e-c9745d25bf2b",
+ "modelVersionId": "5d8911b4-e50c-4096-a81e-727a8157193c",
+ "modelName": "vcpesvcbrgemu111601",
+ "modelVersion": "1.0"
+ },
+
+ """
+ if svc_template['metadata']['type'] != 'Service':
+ self.logger.error('csar error: metadata->type is not Service')
+ return
+
+ metadata = svc_template['metadata']
+ self.svc_model = {
+ 'modelType': 'service',
+ 'modelInvariantId': metadata['invariantUUID'],
+ 'modelVersionId': metadata['UUID'],
+ 'modelName': metadata['name']
+ }
+ if 'version' in metadata:
+ self.svc_model['modelVersion'] = metadata['version']
+ else:
+ self.svc_model['modelVersion'] = '1.0'
+
+ def get_vnf_and_network_model_info(self, svc_template):
+ """ extract vnf and network model info from yaml and convert to what to be used in SO request
+ Sample from yaml:
+ "topology_template": {
+ "node_templates": {
+ "CPE_PUBLIC": {
+ "metadata": {
+ "UUID": "33b2c367-a165-4bb3-81c3-0150cd06ceff",
+ "category": "Generic",
+ "customizationUUID": "db1d4ac2-62cd-4e5d-b2dc-300dbd1a5da1",
+ "description": "Generic NeutronNet",
+ "invariantUUID": "3d4c0e47-4794-4e98-a794-baaced668930",
+ "name": "Generic NeutronNet",
+ "resourceVendor": "ATT (Tosca)",
+ "resourceVendorModelNumber": "",
+ "resourceVendorRelease": "1.0.0.wd03",
+ "subcategory": "Network Elements",
+ "type": "VL",
+ "version": "1.0"
+ },
+ "type": "org.openecomp.resource.vl.GenericNeutronNet"
+ },
+ Convert to
+ {
+ "modelType": "network",
+ "modelInvariantId": "3d4c0e47-4794-4e98-a794-baaced668930",
+ "modelVersionId": "33b2c367-a165-4bb3-81c3-0150cd06ceff",
+ "modelName": "Generic NeutronNet",
+ "modelVersion": "1.0",
+ "modelCustomizationId": "db1d4ac2-62cd-4e5d-b2dc-300dbd1a5da1",
+ "modelCustomizationName": "CPE_PUBLIC"
+ },
+ """
+ node_dic = svc_template['topology_template']['node_templates']
+ for node_name, v in node_dic.items():
+ model = {
+ 'modelInvariantId': v['metadata']['invariantUUID'],
+ 'modelVersionId': v['metadata']['UUID'],
+ 'modelName': v['metadata']['name'],
+ 'modelVersion': v['metadata']['version'],
+ 'modelCustomizationId': v['metadata']['customizationUUID'],
+ 'modelCustomizationName': node_name
+ }
+
+ if v['type'].startswith('org.openecomp.resource.vl.GenericNeutronNet'):
+ # a neutron network is found
+ self.logger.info('Parser found a network: ' + node_name)
+ model['modelType'] = 'network'
+ self.net_models.append(model)
+ elif v['type'].startswith('org.openecomp.resource.vf.'):
+ # a VNF is found
+ self.logger.info('Parser found a VNF: ' + node_name)
+ model['modelType'] = 'vnf'
+ self.vnf_models.append(model)
+ else:
+ self.logger.warning('Parser found a node that is neither a network nor a VNF: ' + node_name)
+
+ def get_vfmodule_model_info(self, svc_template):
+ """ extract network model info from yaml and convert to what to be used in SO request
+ Sample from yaml:
+ "topology_template": {
+ "groups": {
+ "vspinfra1116010..Vspinfra111601..base_vcpe_infra..module-0": {
+ "metadata": {
+ "vfModuleModelCustomizationUUID": "11ddac51-30e3-4a3f-92eb-2eb99c2cb288",
+ "vfModuleModelInvariantUUID": "02f70416-581e-4f00-bde1-d65e69af95c5",
+ "vfModuleModelName": "Vspinfra111601..base_vcpe_infra..module-0",
+ "vfModuleModelUUID": "88c78078-f1fd-4f73-bdd9-10420b0f6353",
+ "vfModuleModelVersion": "1"
+ },
+ "properties": {
+ "availability_zone_count": null,
+ "initial_count": 1,
+ "max_vf_module_instances": 1,
+ "min_vf_module_instances": 1,
+ "vf_module_description": null,
+ "vf_module_label": "base_vcpe_infra",
+ "vf_module_type": "Base",
+ "vfc_list": null,
+ "volume_group": false
+ },
+ "type": "org.openecomp.groups.VfModule"
+ }
+ },
+ Convert to
+ {
+ "modelType": "vfModule",
+ "modelInvariantId": "02f70416-581e-4f00-bde1-d65e69af95c5",
+ "modelVersionId": "88c78078-f1fd-4f73-bdd9-10420b0f6353",
+ "modelName": "Vspinfra111601..base_vcpe_infra..module-0",
+ "modelVersion": "1",
+ "modelCustomizationId": "11ddac51-30e3-4a3f-92eb-2eb99c2cb288",
+ "modelCustomizationName": "Vspinfra111601..base_vcpe_infra..module-0"
+ },
+ """
+ node_dic = svc_template['topology_template']['groups']
+ for node_name, v in node_dic.items():
+ if v['type'].startswith('org.openecomp.groups.VfModule'):
+ model = {
+ 'modelType': 'vfModule',
+ 'modelInvariantId': v['metadata']['vfModuleModelInvariantUUID'],
+ 'modelVersionId': v['metadata']['vfModuleModelUUID'],
+ 'modelName': v['metadata']['vfModuleModelName'],
+ 'modelVersion': v['metadata']['vfModuleModelVersion'],
+ 'modelCustomizationId': v['metadata']['vfModuleModelCustomizationUUID'],
+ 'modelCustomizationName': v['metadata']['vfModuleModelName']
+ }
+ self.vfmodule_models.append(model)
+ self.logger.info('Parser found a VF module: ' + model['modelCustomizationName'])
+
+ def parse_service_yaml(self, filename):
+ # clean up
+ self.svc_model = {}
+ self.net_models = [] # there could be multiple networks
+ self.vnf_models = [] # this version only support a single VNF in the service template
+ self.vfmodule_models = [] # this version only support a single VF module in the service template
+
+ svc_template = yaml.load(file(filename, 'r'))
+ self.get_service_model_info(svc_template)
+ self.get_vnf_and_network_model_info(svc_template)
+ self.get_vfmodule_model_info(svc_template)
+
+ return True
+
+ def parse_csar(self, csar_file):
+ yaml_file = self.get_service_yaml_from_csar(csar_file)
+ if yaml_file != '':
+ return self.parse_service_yaml(yaml_file)
+
+ def print_models(self):
+ print('---------Service Model----------')
+ print(json.dumps(self.svc_model, indent=2, sort_keys=True))
+
+ print('---------Network Model(s)----------')
+ for model in self.net_models:
+ print(json.dumps(model, indent=2, sort_keys=True))
+
+ print('---------VNF Model(s)----------')
+ for model in self.vnf_models:
+ print(json.dumps(model, indent=2, sort_keys=True))
+
+ print('---------VF Module Model(s)----------')
+ for model in self.vfmodule_models:
+ print(json.dumps(model, indent=2, sort_keys=True))
+
+ def test(self):
+ self.parse_csar('csar/service-Vcpesvcinfra111601-csar.csar')
+ self.print_models()
diff --git a/robotframework-onap/vcpeutils/preload.py b/robotframework-onap/vcpeutils/preload.py
new file mode 100755
index 0000000..aab28f9
--- /dev/null
+++ b/robotframework-onap/vcpeutils/preload.py
@@ -0,0 +1,231 @@
+#! /usr/bin/python
+
+import requests
+import json
+import sys
+from datetime import datetime
+from vcpecommon import *
+import csar_parser
+#import logging
+from robot.api import logger
+import base64
+
+
+class Preload:
+ def __init__(self, vcpecommon):
+ #self.logger = logging.getLogger(__name__)
+ self.logger = logger
+ self.vcpecommon = vcpecommon
+
+ def replace(self, sz, replace_dict):
+ for old_string, new_string in replace_dict.items():
+ sz = sz.replace(old_string, new_string)
+ if self.vcpecommon.template_variable_symbol in sz:
+ self.logger.error('Error! Cannot find a value to replace ' + sz)
+ return sz
+
+ def generate_json(self, template_file, replace_dict):
+ with open(template_file) as json_input:
+ json_data = json.load(json_input)
+ stk = [json_data]
+ while len(stk) > 0:
+ data = stk.pop()
+ for k, v in data.items():
+ if type(v) is dict:
+ stk.append(v)
+ elif type(v) is list:
+ stk.extend(v)
+ elif type(v) is str or type(v) is unicode:
+ if self.vcpecommon.template_variable_symbol in v:
+ data[k] = self.replace(v, replace_dict)
+ else:
+ self.logger.warning('Unexpected line in template: %s. Look for value %s', template_file, v)
+ return json_data
+
+ def reset_sniro(self):
+ self.logger.debug('Clearing SNIRO data')
+ r = requests.post(self.vcpecommon.sniro_url + '/reset', headers=self.vcpecommon.sniro_headers)
+ if 2 != r.status_code / 100:
+ self.logger.debug(r.content)
+ self.logger.error('Clearing SNIRO date failed.')
+ sys.exit()
+
+ def preload_sniro(self, template_sniro_data, template_sniro_request, tunnelxconn_ar_name, vgw_name, vbrg_ar_name,
+ vgmux_svc_instance_uuid, vbrg_svc_instance_uuid):
+ self.reset_sniro()
+ self.logger.info('Preloading SNIRO for homing service')
+ replace_dict = {'${tunnelxconn_ar_name}': tunnelxconn_ar_name,
+ '${vgw_name}': vgw_name,
+ '${brg_ar_name}': vbrg_ar_name,
+ '${vgmux_svc_instance_uuid}': vgmux_svc_instance_uuid,
+ '${vbrg_svc_instance_uuid}': vbrg_svc_instance_uuid
+ }
+ sniro_data = self.generate_json(template_sniro_data, replace_dict)
+ self.logger.debug('SNIRO data:')
+ self.logger.debug(json.dumps(sniro_data, indent=4, sort_keys=True))
+
+ base64_sniro_data = base64.b64encode(json.dumps(sniro_data))
+ self.logger.debug('SNIRO data: 64')
+ self.logger.debug(base64_sniro_data)
+ replace_dict = {'${base64_sniro_data}': base64_sniro_data, '${sniro_ip}': self.vcpecommon.hosts['robot']}
+ sniro_request = self.generate_json(template_sniro_request, replace_dict)
+ self.logger.debug('SNIRO request:')
+ self.logger.debug(json.dumps(sniro_request, indent=4, sort_keys=True))
+
+ r = requests.post(self.vcpecommon.sniro_url, headers=self.vcpecommon.sniro_headers, json=sniro_request)
+ if 2 != r.status_code / 100:
+ response = r.json()
+ self.logger.debug(json.dumps(response, indent=4, sort_keys=True))
+ self.logger.error('SNIRO preloading failed.')
+ sys.exit()
+
+ return True
+
+ def preload_network(self, template_file, network_role, subnet_start_ip, subnet_gateway, common_dict, name_suffix):
+ """
+ :param template_file:
+ :param network_role: cpe_signal, cpe_public, brg_bng, bng_mux, mux_gw
+ :param subnet_start_ip:
+ :param subnet_gateway:
+ :param name_suffix: e.g. '201711201311'
+ :return:
+ """
+ network_name = '_'.join([self.vcpecommon.instance_name_prefix['network'], network_role.lower(), name_suffix])
+ subnet_name = self.vcpecommon.network_name_to_subnet_name(network_name)
+ common_dict['${' + network_role+'_net}'] = network_name
+ common_dict['${' + network_role+'_subnet}'] = subnet_name
+ replace_dict = {'${network_role}': network_role,
+ '${service_type}': 'vCPE',
+ '${network_type}': 'Generic NeutronNet',
+ '${network_name}': network_name,
+ '${subnet_start_ip}': subnet_start_ip,
+ '${subnet_gateway}': subnet_gateway
+ }
+ self.logger.info('Preloading network ' + network_role)
+ return self.preload(template_file, replace_dict, self.vcpecommon.sdnc_preload_network_url)
+
+ def preload(self, template_file, replace_dict, url):
+ self.logger.debug(json.dumps(replace_dict, indent=4, sort_keys=True))
+ json_data = self.generate_json(template_file, replace_dict)
+ self.logger.debug(json.dumps(json_data, indent=4, sort_keys=True))
+ r = requests.post(url, headers=self.vcpecommon.sdnc_headers, auth=self.vcpecommon.sdnc_userpass, json=json_data)
+ response = r.json()
+ if int(response.get('output', {}).get('response-code', 0)) != 200:
+ self.logger.debug(json.dumps(response, indent=4, sort_keys=True))
+ self.logger.error('Preloading failed.')
+ return False
+ return True
+
+ def preload_vgw(self, template_file, brg_mac, commont_dict, name_suffix):
+ replace_dict = {'${brg_mac}': brg_mac,
+ '${suffix}': name_suffix
+ }
+ replace_dict.update(commont_dict)
+ self.logger.info('Preloading vGW')
+ return self.preload(template_file, replace_dict, self.vcpecommon.sdnc_preload_vnf_url)
+
+ def preload_vgw_gra(self, template_file, brg_mac, commont_dict, name_suffix, vgw_vfmod_name_index):
+ replace_dict = {'${brg_mac}': brg_mac,
+ '${suffix}': name_suffix,
+ '${vgw_vfmod_name_index}': vgw_vfmod_name_index
+ }
+ replace_dict.update(commont_dict)
+ self.logger.info('Preloading vGW-GRA')
+ return self.preload(template_file, replace_dict, self.vcpecommon.sdnc_preload_gra_url)
+
+ def preload_vfmodule(self, template_file, service_instance_id, vnf_model, vfmodule_model, common_dict, name_suffix):
+ """
+ :param template_file:
+ :param service_instance_id:
+ :param vnf_model: parsing results from csar_parser
+ :param vfmodule_model: parsing results from csar_parser
+ :param common_dict:
+ :param name_suffix:
+ :return:
+ """
+
+ # examples:
+ # vfmodule_model['modelCustomizationName']: "Vspinfra111601..base_vcpe_infra..module-0",
+ # vnf_model['modelCustomizationName']: "vspinfra111601 0",
+
+ vfmodule_name = '_'.join([self.vcpecommon.instance_name_prefix['vfmodule'],
+ vfmodule_model['modelCustomizationName'].split('..')[0].lower(), name_suffix])
+
+ # vnf_type and generic_vnf_type are identical
+ replace_dict = {'${vnf_type}': vfmodule_model['modelCustomizationName'],
+ '${generic_vnf_type}': vfmodule_model['modelCustomizationName'],
+ '${service_type}': service_instance_id,
+ '${generic_vnf_name}': vnf_model['modelCustomizationName'],
+ '${vnf_name}': vfmodule_name,
+ '${mr_ip_addr}': self.vcpecommon.mr_ip_addr,
+ '${mr_ip_port}': self.vcpecommon.mr_ip_port,
+ '${sdnc_oam_ip}': self.vcpecommon.sdnc_oam_ip,
+ '${suffix}': name_suffix}
+ replace_dict.update(common_dict)
+ self.logger.info('Preloading VF Module ' + vfmodule_name)
+ return self.preload(template_file, replace_dict, self.vcpecommon.sdnc_preload_vnf_url)
+
+ def preload_all_networks(self, template_file, name_suffix):
+ common_dict = {'${' + k + '}': v for k, v in self.vcpecommon.common_preload_config.items()}
+ for network, v in self.vcpecommon.preload_network_config.items():
+ subnet_start_ip, subnet_gateway_ip = v
+ if not self.preload_network(template_file, network, subnet_start_ip, subnet_gateway_ip,
+ common_dict, name_suffix):
+ return None
+ return common_dict
+
+ def test(self):
+ # this is for testing purpose
+ name_suffix = datetime.now().strftime('%Y%m%d%H%M')
+ vcpecommon = VcpeCommon()
+ preloader = Preload(vcpecommon)
+
+ network_dict = {'${' + k + '}': v for k, v in self.vcpecommon.common_preload_config.items()}
+ template_file = 'preload_templates/template.network.json'
+ for k, v in self.vcpecommon.preload_network_config.items():
+ if not preloader.preload_network(template_file, k, v[0], v[1], network_dict, name_suffix):
+ break
+
+ print('---------------------------------------------------------------')
+ print('Network related replacement dictionary:')
+ print(json.dumps(network_dict, indent=4, sort_keys=True))
+ print('---------------------------------------------------------------')
+
+ keys = ['infra', 'bng', 'gmux', 'brg']
+ for key in keys:
+ csar_file = self.vcpecommon.find_file(key, 'csar', 'csar')
+ template_file = self.vcpecommon.find_file(key, 'json', 'preload_templates')
+ if csar_file and template_file:
+ parser = csar_parser.CsarParser()
+ parser.parse_csar(csar_file)
+ service_instance_id = 'test112233'
+ preloader.preload_vfmodule(template_file, service_instance_id, parser.vnf_models[0],
+ parser.vfmodule_models[0], network_dict, name_suffix)
+
+ def test_sniro(self):
+ template_sniro_data = self.vcpecommon.find_file('sniro_data', 'json', 'preload_templates')
+ template_sniro_request = self.vcpecommon.find_file('sniro_request', 'json', 'preload_templates')
+
+ vcperescust_csar = self.vcpecommon.find_file('rescust', 'csar', 'csar')
+ parser = csar_parser.CsarParser()
+ parser.parse_csar(vcperescust_csar)
+ tunnelxconn_ar_name = None
+ brg_ar_name = None
+ vgw_name = None
+ for model in parser.vnf_models:
+ if 'tunnel' in model['modelCustomizationName']:
+ tunnelxconn_ar_name = model['modelCustomizationName']
+ elif 'brg' in model['modelCustomizationName']:
+ brg_ar_name = model['modelCustomizationName']
+ elif 'vgw' in model['modelCustomizationName']:
+ vgw_name = model['modelCustomizationName']
+
+ if not (tunnelxconn_ar_name and brg_ar_name and vgw_name):
+ self.logger.error('Cannot find all names from %s.', vcperescust_csar)
+ sys.exit()
+
+ vgmux_svc_instance_uuid = '88888888888888'
+ vbrg_svc_instance_uuid = '999999999999999'
+
+ self.preload_sniro(template_sniro_data, template_sniro_request, tunnelxconn_ar_name, vgw_name, brg_ar_name,
+ vgmux_svc_instance_uuid, vbrg_svc_instance_uuid)
diff --git a/robotframework-onap/vcpeutils/vcpecommon.py b/robotframework-onap/vcpeutils/vcpecommon.py
new file mode 100755
index 0000000..95b5bbe
--- /dev/null
+++ b/robotframework-onap/vcpeutils/vcpecommon.py
@@ -0,0 +1,325 @@
+import json
+import logging
+import os
+import pickle
+import re
+import sys
+
+import ipaddress
+import requests
+import commands
+import time
+from novaclient import client as openstackclient
+from netaddr import IPAddress, IPNetwork
+
+class VcpeCommon:
+ #############################################################################################
+ # Start: configurations that you must change for a new ONAP installation
+ external_net_addr = '10.12.0.0'
+ external_net_prefix_len = 16
+ #############################################################################################
+ # set the openstack cloud access credentials here
+ oom_mode = True
+
+ cloud = {
+ '--os-auth-url': 'http://10.12.25.2:5000',
+ '--os-username': 'kxi',
+ '--os-user-domain-id': 'default',
+ '--os-project-domain-id': 'default',
+ '--os-tenant-id': '09d8566ea45e43aa974cf447ed591d77' if oom_mode else '1e097c6713e74fd7ac8e4295e605ee1e',
+ '--os-region-name': 'RegionOne',
+ '--os-password': 'n3JhGMGuDzD8',
+ '--os-project-domain-name': 'Integration-SB-03' if oom_mode else 'Integration-SB-07',
+ '--os-identity-api-version': '3'
+ }
+
+ common_preload_config = {
+ 'oam_onap_net': 'oam_network_2No2' if oom_mode else 'oam_onap_lAky',
+ 'oam_onap_subnet': 'oam_network_2No2' if oom_mode else 'oam_onap_lAky',
+ 'public_net': 'external',
+ 'public_net_id': '971040b2-7059-49dc-b220-4fab50cb2ad4'
+ }
+ sdnc_controller_pod = 'dev-sdnc-sdnc-0'
+
+ #############################################################################################
+
+ template_variable_symbol = '${'
+ cpe_vm_prefix = 'zdcpe'
+ #############################################################################################
+ # preloading network config
+ # key=network role
+ # value = [subnet_start_ip, subnet_gateway_ip]
+ preload_network_config = {
+ 'cpe_public': ['10.2.0.2', '10.2.0.1'],
+ 'cpe_signal': ['10.4.0.2', '10.4.0.1'],
+ 'brg_bng': ['10.3.0.2', '10.3.0.1'],
+ 'bng_mux': ['10.1.0.10', '10.1.0.1'],
+ 'mux_gw': ['10.5.0.10', '10.5.0.1']
+ }
+
+ dcae_ves_collector_name = 'dcae-bootstrap'
+ global_subscriber_id = 'Demonstration'
+ project_name = 'Project-Demonstration'
+ owning_entity_id = '520cc603-a3c4-4ec2-9ef4-ca70facd79c0'
+ owning_entity_name = 'OE-Demonstration1'
+
+ def __init__(self, extra_host_names=None):
+ rootlogger = logging.getLogger()
+ handler = logging.StreamHandler()
+ formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s.%(funcName)s(): %(message)s')
+ handler.setFormatter(formatter)
+ rootlogger.addHandler(handler)
+ rootlogger.setLevel(logging.INFO)
+
+ self.logger = logging.getLogger(__name__)
+ self.logger.propagate = False
+ self.logger.addHandler(handler)
+ self.logger.setLevel(logging.DEBUG)
+ self.logger.info('Initializing configuration')
+
+ # CHANGEME: vgw_VfModuleModelInvariantUuid is in rescust service csar, look in service-VcpesvcRescust1118-template.yml for groups vgw module metadata. TODO: read this value automcatically
+ self.vgw_VfModuleModelInvariantUuid = '26d6a718-17b2-4ba8-8691-c44343b2ecd2'
+ # CHANGEME: OOM: this is the address that the brg and bng will nat for sdnc access - 10.0.0.x address of k8 host for sdnc-0 container
+ #self.sdnc_oam_ip = self.get_pod_node_oam_ip('sdnc-sdnc-0')
+ self.sdnc_oam_ip = 'sdnc.onap'
+ # CHANGEME: OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
+ #self.oom_so_sdnc_aai_ip = self.get_pod_node_public_ip('sdnc-sdnc-0')
+ #self.oom_so_sdnc_aai_ip = self.get_pod_node_public_ip('sdnc-sdnc-0')
+ # CHANGEME: OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
+ #self.oom_dcae_ves_collector = self.oom_so_sdnc_aai_ip
+ # CHANGEME: OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
+ #self.mr_ip_addr = self.oom_so_sdnc_aai_ip
+ self.mr_ip_addr = 'mr.onap'
+ #self.mr_ip_port = '30227'
+ self.mr_ip_port = '3904'
+ #self.so_nbi_port = '30277' if self.oom_mode else '8080'
+ self.so_nbi_port = '8080'
+ #self.sdnc_preloading_port = '30202' if self.oom_mode else '8282'
+ self.sdnc_preloading_port = '8282'
+ #self.aai_query_port = '30233' if self.oom_mode else '8443'
+ self.aai_query_port = '8443'
+ #self.sniro_port = '30288' if self.oom_mode else '8080'
+ self.sniro_port = '8080'
+
+ self.host_names = ['so', 'sdnc', 'robot', 'aai', self.dcae_ves_collector_name]
+ if extra_host_names:
+ self.host_names.extend(extra_host_names)
+ # get IP addresses
+ #self.hosts = self.get_vm_ip(self.host_names, self.external_net_addr, self.external_net_prefix_len)
+ self.hosts = { 'so': 'so.onap', 'sdnc': 'sdnc.onap', 'robot': 'robot.onap', 'aai': 'aai.onap' }
+ # this is the keyword used to name vgw stack, must not be used in other stacks
+ self.vgw_name_keyword = 'base_vcpe_vgw'
+ # this is the file that will keep the index of last assigned SO name
+ self.vgw_vfmod_name_index_file= '__var/vgw_vfmod_name_index'
+ self.svc_instance_uuid_file = '__var/svc_instance_uuid'
+ self.preload_dict_file = '__var/preload_dict'
+ self.vgmux_vnf_name_file = '__var/vgmux_vnf_name'
+ self.product_family_id = 'f9457e8c-4afd-45da-9389-46acd9bf5116'
+ self.custom_product_family_id = 'a9a77d5a-123e-4ca2-9eb9-0b015d2ee0fb'
+ self.instance_name_prefix = {
+ 'service': 'svc',
+ 'network': 'net',
+ 'vnf': 'vnf',
+ 'vfmodule': 'vf'
+ }
+ self.aai_userpass = 'AAI', 'AAI'
+ self.pub_key = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh'
+ self.os_tenant_id = self.cloud['--os-tenant-id']
+ self.os_region_name = self.cloud['--os-region-name']
+ self.common_preload_config['pub_key'] = self.pub_key
+ self.sniro_url = 'http://' + self.hosts['robot'] + ':' + self.sniro_port + '/__admin/mappings'
+ self.sniro_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.homing_solution = 'sniro' # value is either 'sniro' or 'oof'
+# self.homing_solution = 'oof'
+ self.customer_location_used_by_oof = {
+ "customerLatitude": "32.897480",
+ "customerLongitude": "-97.040443",
+ "customerName": "some_company"
+ }
+
+ #############################################################################################
+ # SDNC urls
+ self.sdnc_userpass = 'admin', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
+ self.sdnc_db_name = 'sdnctl'
+ self.sdnc_db_user = 'sdnctl'
+ self.sdnc_db_pass = 'gamma'
+ self.sdnc_db_port = '32774'
+ self.sdnc_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.sdnc_preload_network_url = 'http://' + self.hosts['sdnc'] + \
+ ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-network-topology-operation'
+ self.sdnc_preload_vnf_url = 'http://' + self.hosts['sdnc'] + \
+ ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-vnf-topology-operation'
+ self.sdnc_preload_gra_url = 'http://' + self.hosts['sdnc'] + \
+ ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-vf-module-topology-operation'
+ self.sdnc_ar_cleanup_url = 'http://' + self.hosts['sdnc'] + ':' + self.sdnc_preloading_port + \
+ '/restconf/config/GENERIC-RESOURCE-API:'
+
+ #############################################################################################
+ # SO urls, note: do NOT add a '/' at the end of the url
+ self.so_req_api_url = {'v4': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances',
+ 'v5': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances'}
+ self.so_check_progress_api_url = 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/orchestrationRequests/v6'
+ self.so_userpass = 'InfraPortalClient', 'password1$'
+ self.so_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.so_db_name = 'catalogdb'
+ self.so_db_user = 'root'
+ self.so_db_pass = 'password'
+ self.so_db_port = '30252' if self.oom_mode else '32769'
+
+ self.vpp_inf_url = 'http://{0}:8183/restconf/config/ietf-interfaces:interfaces'
+ self.vpp_api_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.vpp_api_userpass = ('admin', 'admin')
+ self.vpp_ves_url= 'http://{0}:8183/restconf/config/vesagent:vesagent'
+
+
+
+ def find_file(self, file_name_keyword, file_ext, search_dir):
+ """
+ :param file_name_keyword: keyword used to look for the csar file, case insensitive matching, e.g, infra
+ :param file_ext: e.g., csar, json
+ :param search_dir path to search
+ :return: path name of the file
+ """
+ file_name_keyword = file_name_keyword.lower()
+ file_ext = file_ext.lower()
+ if not file_ext.startswith('.'):
+ file_ext = '.' + file_ext
+
+ filenamepath = None
+ for file_name in os.listdir(search_dir):
+ file_name_lower = file_name.lower()
+ if file_name_keyword in file_name_lower and file_name_lower.endswith(file_ext):
+ if filenamepath:
+ self.logger.error('Multiple files found for *{0}*.{1} in '
+ 'directory {2}'.format(file_name_keyword, file_ext, search_dir))
+ sys.exit()
+ filenamepath = os.path.abspath(os.path.join(search_dir, file_name))
+
+ if filenamepath:
+ return filenamepath
+ else:
+ self.logger.error("Cannot find *{0}*{1} in directory {2}".format(file_name_keyword, file_ext, search_dir))
+ sys.exit()
+
+ @staticmethod
+ def network_name_to_subnet_name(network_name):
+ """
+ :param network_name: example: vcpe_net_cpe_signal_201711281221
+ :return: vcpe_net_cpe_signal_subnet_201711281221
+ """
+ fields = network_name.split('_')
+ fields.insert(-1, 'subnet')
+ return '_'.join(fields)
+
+ def set_network_name(self, network_name):
+ param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
+ openstackcmd = 'openstack ' + param
+ cmd = ' '.join([openstackcmd, 'network set --name', network_name, 'ONAP-NW1'])
+ os.popen(cmd)
+
+ def set_subnet_name(self, network_name):
+ """
+ Example: network_name = vcpe_net_cpe_signal_201711281221
+ set subnet name to vcpe_net_cpe_signal_subnet_201711281221
+ :return:
+ """
+ param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
+ openstackcmd = 'openstack ' + param
+
+ # expected results: | subnets | subnet_id |
+ subnet_info = os.popen(openstackcmd + ' network show ' + network_name + ' |grep subnets').read().split('|')
+ if len(subnet_info) > 2 and subnet_info[1].strip() == 'subnets':
+ subnet_id = subnet_info[2].strip()
+ subnet_name = self.network_name_to_subnet_name(network_name)
+ cmd = ' '.join([openstackcmd, 'subnet set --name', subnet_name, subnet_id])
+ os.popen(cmd)
+ self.logger.info("Subnet name set to: " + subnet_name)
+ return True
+ else:
+ self.logger.error("Can't get subnet info from network name: " + network_name)
+ return False
+
+ def is_node_in_aai(self, node_type, node_uuid):
+ key = None
+ search_node_type = None
+ if node_type == 'service':
+ search_node_type = 'service-instance'
+ key = 'service-instance-id'
+ elif node_type == 'vnf':
+ search_node_type = 'generic-vnf'
+ key = 'vnf-id'
+ else:
+ logging.error('Invalid node_type: ' + node_type)
+ sys.exit()
+
+ url = 'https://{0}:{1}/aai/v11/search/nodes-query?search-node-type={2}&filter={3}:EQUALS:{4}'.format(
+ self.hosts['aai'], self.aai_query_port, search_node_type, key, node_uuid)
+
+ headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-FromAppID': 'vCPE-Robot', 'X-TransactionId': 'get_aai_subscr'}
+ requests.packages.urllib3.disable_warnings()
+ r = requests.get(url, headers=headers, auth=self.aai_userpass, verify=False)
+ response = r.json()
+ self.logger.debug('aai query: ' + url)
+ self.logger.debug('aai response:\n' + json.dumps(response, indent=4, sort_keys=True))
+ return 'result-data' in response
+
+ @staticmethod
+ def extract_ip_from_str(net_addr, net_addr_len, sz):
+ """
+ :param net_addr: e.g. 10.5.12.0
+ :param net_addr_len: e.g. 24
+ :param sz: a string
+ :return: the first IP address matching the network, e.g. 10.5.12.3
+ """
+ network = ipaddress.ip_network(unicode('{0}/{1}'.format(net_addr, net_addr_len)), strict=False)
+ ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}', sz)
+ for ip in ip_list:
+ this_net = ipaddress.ip_network(unicode('{0}/{1}'.format(ip, net_addr_len)), strict=False)
+ if this_net == network:
+ return str(ip)
+ return None
+
+
+
+ @staticmethod
+ def save_object(obj, filepathname):
+ with open(filepathname, 'wb') as fout:
+ pickle.dump(obj, fout)
+
+ @staticmethod
+ def load_object(filepathname):
+ with open(filepathname, 'rb') as fin:
+ return pickle.load(fin)
+
+ @staticmethod
+ def increase_ip_address_or_vni_in_template(vnf_template_file, vnf_parameter_name_list):
+ with open(vnf_template_file) as json_input:
+ json_data = json.load(json_input)
+ param_list = json_data['VNF-API:input']['VNF-API:vnf-topology-information']['VNF-API:vnf-parameters']
+ for param in param_list:
+ if param['vnf-parameter-name'] in vnf_parameter_name_list:
+ ipaddr_or_vni = param['vnf-parameter-value'].split('.')
+ number = int(ipaddr_or_vni[-1])
+ if 254 == number:
+ number = 10
+ else:
+ number = number + 1
+ ipaddr_or_vni[-1] = str(number)
+ param['vnf-parameter-value'] = '.'.join(ipaddr_or_vni)
+
+ assert json_data is not None
+ with open(vnf_template_file, 'w') as json_output:
+ json.dump(json_data, json_output, indent=4, sort_keys=True)
+
+ def save_preload_data(self, preload_data):
+ self.save_object(preload_data, self.preload_dict_file)
+
+ def load_preload_data(self):
+ return self.load_object(self.preload_dict_file)
+
+ def save_vgmux_vnf_name(self, vgmux_vnf_name):
+ self.save_object(vgmux_vnf_name, self.vgmux_vnf_name_file)
+
+ def load_vgmux_vnf_name(self):
+ return self.load_object(self.vgmux_vnf_name_file)
+