diff options
author | Hansen, Tony (th1395) <th1395@att.com> | 2018-03-28 20:51:00 +0000 |
---|---|---|
committer | Hansen, Tony (th1395) <th1395@att.com> | 2018-03-28 21:20:10 +0000 |
commit | adcb1dfaf4d88062c11630fee59eacb15965c373 (patch) | |
tree | 09d47c8ce2db298fe9b651d65ab8022d88a34c67 | |
parent | 0dc92cb05329fb28e8cabfc276e9a9993ce47b7a (diff) |
add capability to take over an existing DB
add ability to take a port as part of the hostname
add IPv6 support as [IPv6address] or [IPv6address]:port
Change-Id: Ia7a7309f7964b64fa65b0fa98891e529d7765056
Signed-off-by: Hansen, Tony (th1395) <th1395@att.com>
Issue-ID: CCSDK-228
Signed-off-by: Hansen, Tony (th1395) <th1395@att.com>
-rw-r--r-- | pgaas/LICENSE.txt | 4 | ||||
-rw-r--r-- | pgaas/pgaas/logginginterface.py | 55 | ||||
-rw-r--r-- | pgaas/pgaas/pgaas_plugin.py | 184 | ||||
-rw-r--r-- | pgaas/pgaas_types.yaml | 14 | ||||
-rw-r--r-- | pgaas/pom.xml | 10 | ||||
-rw-r--r-- | pgaas/setup.py | 20 | ||||
-rw-r--r-- | pgaas/tests/test_plugin.py | 23 | ||||
-rw-r--r-- | pgaas/tox.ini | 10 |
8 files changed, 228 insertions, 92 deletions
diff --git a/pgaas/LICENSE.txt b/pgaas/LICENSE.txt index f90f8f1..e77a45a 100644 --- a/pgaas/LICENSE.txt +++ b/pgaas/LICENSE.txt @@ -1,7 +1,7 @@ -============LICENSE_START======================================================= org.onap.ccsdk +============LICENSE_START======================================================= ================================================================================ -Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. +Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved. ================================================================================ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pgaas/pgaas/logginginterface.py b/pgaas/pgaas/logginginterface.py new file mode 100644 index 0000000..5974a25 --- /dev/null +++ b/pgaas/pgaas/logginginterface.py @@ -0,0 +1,55 @@ +# org.onap.ccsdk +# ============LICENSE_START==================================================== +# ============================================================================= +# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. +# ============================================================================= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END====================================================== + +from cloudify import ctx + +def debug(msg): + """ + Print a debugging message. + This is a handy endpoint to add other extended debugging calls. + """ + ctx.logger.debug(msg) + with open("/var/tmp/pgaas.log", "a") as fd: + fd.write("DEBUG: " + msg + "\n") + +def warn(msg): + """ + Print a warning message. + This is a handy endpoint to add other extended warning calls. + """ + ctx.logger.warn(msg) + with open("/var/tmp/pgaas.log", "a") as fd: + fd.write("WARN: " + msg + "\n") + +def error(msg): + """ + Print an error message. + This is a handy endpoint to add other extended error calls. + """ + ctx.logger.error(msg) + with open("/var/tmp/pgaas.log", "a") as fd: + fd.write("ERROR: " + msg + "\n") + +def info(msg): + """ + Print a info message. + This is a handy endpoint to add other extended info calls. + """ + ctx.logger.info(msg) + with open("/var/tmp/pgaas.log", "a") as fd: + fd.write("INFO: " + msg + "\n") diff --git a/pgaas/pgaas/pgaas_plugin.py b/pgaas/pgaas/pgaas_plugin.py index bfeeba0..c0cab67 100644 --- a/pgaas/pgaas/pgaas_plugin.py +++ b/pgaas/pgaas/pgaas_plugin.py @@ -1,8 +1,28 @@ +# org.onap.ccsdk +# ============LICENSE_START==================================================== +# ============================================================================= +# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved. +# ============================================================================= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END====================================================== + from cloudify import ctx from cloudify.decorators import operation from cloudify.exceptions import NonRecoverableError from cloudify.exceptions import RecoverableError +from logginginterface import * + import os import re import json @@ -43,8 +63,22 @@ sys.path = opath # or: writerfqdn: { get_property: [ dns_pgrs_rw, fqdn ] } use_existing: true + To initialize an existing server to be managed by pgaas_plugin:: + - https://$NEXUS/repository/raw/type_files/sshkeyshare/sshkey_types.yaml + - https://$NEXUS/repository/raw/type_files/pgaas_types.yaml + pgaas_cluster: + type: dcae.nodes.pgaas.cluster + properties: + writerfqdn: { concat: [ { get_input: location_prefix }, '-', { get_input: pgaas_cluster_name }, '-write.', { get_input: location_domain } ] } + readerfqdn: { concat: [ { get_input: location_prefix }, '-', { get_input: pgaas_cluster_name }, '.', { get_input: location_domain } ] } + initialpassword: { get_input: currentpassword } + relationships: + - type: dcae.relationships.pgaas_cluster_uses_sshkeypair + target: sharedsshkey_pgrs + - { get_attribute: [ pgaas_cluster, public ] } - { get_attribute: [ pgaas_cluster, base64private ] } + # - { get_attribute: [ pgaas_cluster, postgrespswd ] } To set up a database: @@ -103,33 +137,12 @@ def setOptManagerResources(o): def safestr(s): return urllib.quote(str(s), '') -def debug(msg): - """ - Print a debugging message. - This is a handy endpoint to add other extended debugging calls. - """ - ctx.logger.info(msg) - -def warn(msg): - """ - Print a warning message. - This is a handy endpoint to add other extended warning calls. - """ - ctx.logger.warn(msg) - -def info(msg): - """ - Print a info message. - This is a handy endpoint to add other extended info calls. - """ - ctx.logger.info(msg) - def raiseRecoverableError(msg): """ Print a warning message and raise a RecoverableError exception. This is a handy endpoint to add other extended debugging calls. """ - ctx.logger.warn(msg) + warn(msg) raise RecoverableError(msg) def raiseNonRecoverableError(msg): @@ -137,9 +150,12 @@ def raiseNonRecoverableError(msg): Print an error message and raise a NonRecoverableError exception. This is a handy endpoint to add other extended debugging calls. """ - ctx.logger.error(msg) + error(msg) raise NonRecoverableError(msg) +def dbexecute(crx, cmd, args=None): + debug("executing {}".format(cmd)) + crx.execute(cmd, args) def waithp(host, port): """ @@ -148,7 +164,7 @@ def waithp(host, port): debug("waithp({0},{1})".format(safestr(host),safestr(port))) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: - sock.connect((host, port)) + sock.connect((host, int(port))) except: a, b, c = sys.exc_info() traceback.print_exception(a, b, c) @@ -160,30 +176,57 @@ def doconn(desc): """ open an SQL connection to the PG server """ - debug("doconn()") + debug("doconn({},{},{})".format(desc['host'], desc['user'],desc['database'])) + # debug("doconn({},{},{},{})".format(desc['host'], desc['user'],desc['database'],desc['password'])) ret = psycopg2.connect(**desc) ret.autocommit = True return ret -def rootdesc(data, dbname): +def hostportion(hostport): + """ + return the host portion of a fqdn:port or IPv4:port or [IPv6]:port + """ + ipv4re = re.match(r"^([^:]+)(:(\d+))?", hostport) + ipv6re = re.match(r"^[[]([^]]+)[]](:(\d+))?", hostport) + if ipv4re: + return ipv4re.group(1) + if ipv6re: + return ipv6re.group(1) + raiseNonRecoverableError("invalid hostport: {}".format(hostport)) + +def portportion(hostport): + """ + Return the port portion of a fqdn:port or IPv4:port or [IPv6]:port. + If port is not present, return 5432. + """ + ipv6re = re.match(r"^[[]([^]]+)[]](:(\d+))?", hostport) + ipv4re = re.match(r"^([^:]+)(:(\d+))?", hostport) + if ipv4re: + return ipv4re.group(3) if ipv4re.group(3) else '5432' + if ipv6re: + return ipv6re.group(3) if ipv6re.group(3) else '5432' + raiseNonRecoverableError("invalid hostport: {}".format(hostport)) + +def rootdesc(data, dbname, initialpassword=None): """ return the postgres connection information """ debug("rootdesc(..data..,{0})".format(safestr(dbname))) return { 'database': dbname, - 'host': data['rw'], + 'host': hostportion(data['rw']), + 'port': portportion(data['rw']), 'user': 'postgres', - 'password': getpass(data, 'postgres') + 'password': initialpassword if initialpassword else getpass(data, 'postgres') } -def rootconn(data, dbname='postgres'): +def rootconn(data, dbname='postgres', initialpassword=None): """ connect to a given server as postgres, connecting to the specified database """ debug("rootconn(..data..,{0})".format(safestr(dbname))) - ret = doconn(rootdesc(data, dbname)) + ret = doconn(rootdesc(data, dbname, initialpassword)) return ret def onedesc(data, dbname, role, access): @@ -193,7 +236,8 @@ def onedesc(data, dbname, role, access): user = '{0}_{1}'.format(dbname, role) return { 'database': dbname, - 'host': data[access], + 'host': hostportion(data[access]), + 'port': portportion(data[access]), 'user': user, 'password': getpass(data, user) } @@ -212,7 +256,7 @@ def getpass(data, ident): """ generate the password for a given user on a specific server """ - m = hashlib.md5() + m = hashlib.sha256() m.update(ident) m.update(base64.b64decode(data['data'])) return m.hexdigest() @@ -234,7 +278,10 @@ def chkfqdn(fqdn): """ verify that a FQDN is valid """ - return re.match('^[a-zA-Z0-9_-]+(\\.[a-zA-Z0-9_-]+)+$', fqdn) is not None + hp = hostportion(fqdn) + pp = portportion(fqdn) + # TODO need to augment this for IPv6 addresses + return re.match('^[a-zA-Z0-9_-]+(\\.[a-zA-Z0-9_-]+)+$', hp) is not None def chkdbname(dbname): """ @@ -244,13 +291,13 @@ def chkdbname(dbname): if not ret: warn("Invalid dbname: {0}".format(safestr(dbname))) return ret -def getclusterinfo(wfqdn, reuse, rfqdn, related): +def getclusterinfo(wfqdn, reuse, rfqdn, initialpassword, related): """ Retrieve all of the information specific to a cluster. if reuse, retrieve it else create and store it """ - debug("getclusterinfo({0}, {1}, {2},..related..)".format(safestr(wfqdn), safestr(reuse), safestr(rfqdn))) + debug("getclusterinfo({}, {}, {}, {}, ..related..)".format(safestr(wfqdn), safestr(reuse), safestr(rfqdn), safestr(initialpassword))) if not chkfqdn(wfqdn): raiseNonRecoverableError('Invalid FQDN specified for admin/read-write access, fqdn={0}'.format(safestr(wfqdn))) if reuse: @@ -274,7 +321,7 @@ def getclusterinfo(wfqdn, reuse, rfqdn, related): raiseNonRecoverableError('Invalid FQDN specified for read-only access, fqdn={0}'.format(safestr(rfqdn))) if len(related) != 1: raiseNonRecoverableError('Cluster SSH keypair must be specified using a dcae.relationships.pgaas_cluster_uses_sshkeypair relationship to a dcae.nodes.sshkeypair node') - data = { 'ro': rfqdn, 'pubkey': related[0].instance.runtime_properties['public'], 'data': related[0].instance.runtime_properties['base64private'] } + data = { 'ro': rfqdn, 'pubkey': related[0].instance.runtime_properties['public'], 'data': related[0].instance.runtime_properties['base64private'], 'hash': 'sha256' } os.umask(077) try: os.makedirs('{0}/pgaas'.format(OPT_MANAGER_RESOURCES)) @@ -288,8 +335,12 @@ def getclusterinfo(wfqdn, reuse, rfqdn, related): warn("Stack: {0}".format(traceback.format_exc())) raiseNonRecoverableError('Cannot write cluster information to {0}/pgaas: fqdn={1}, err={2}'.format(OPT_MANAGER_RESOURCES, safestr(wfqdn),e)) data['rw'] = wfqdn + if initialpassword: + with rootconn(data, initialpassword=initialpassword) as conn: + crr = conn.cursor() + dbexecute(crr, "ALTER USER postgres WITH PASSWORD %s", (getpass(data, 'postgres'),)) + crr.close() return(data) - @operation def add_pgaas_cluster(**kwargs): @@ -299,9 +350,14 @@ def add_pgaas_cluster(**kwargs): """ try: warn("add_pgaas_cluster() invoked") - data = getclusterinfo(ctx.node.properties['writerfqdn'], ctx.node.properties['use_existing'], ctx.node.properties['readerfqdn'], find_related_nodes('dcae.relationships.pgaas_cluster_uses_sshkeypair')) + data = getclusterinfo(ctx.node.properties['writerfqdn'], + ctx.node.properties['use_existing'], + ctx.node.properties['readerfqdn'], + ctx.node.properties['initialpassword'], + find_related_nodes('dcae.relationships.pgaas_cluster_uses_sshkeypair')) ctx.instance.runtime_properties['public'] = data['pubkey'] ctx.instance.runtime_properties['base64private'] = data['data'] + # ctx.instance.runtime_properties['postgrespswd'] = getpass(data, 'postgres') warn('All done') except Exception as e: ctx.logger.warn("Error: {0}".format(e)) @@ -338,8 +394,8 @@ def dbgetinfo(refctx): wfqdn = related[0].node.properties['writerfqdn'] if not chkfqdn(wfqdn): raiseNonRecoverableError('Invalid FQDN specified for admin/read-write access, fqdn={0}'.format(safestr(wfqdn))) - ret = getclusterinfo(wfqdn, True, '', []) - waithp(wfqdn, 5432) + ret = getclusterinfo(wfqdn, True, '', '', []) + waithp(hostportion(wfqdn), portportion(wfqdn)) return ret @operation @@ -363,13 +419,13 @@ def create_database(**kwargs): ctx.instance.runtime_properties['viewer'] = descs['viewer'] with rootconn(info) as conn: crx = conn.cursor() - crx.execute('SELECT datname FROM pg_database WHERE datistemplate = false') + dbexecute(crx,'SELECT datname FROM pg_database WHERE datistemplate = false') existingdbs = [ x[0] for x in crx ] if ctx.node.properties['use_existing']: if dbname not in existingdbs: raiseNonRecoverableError('use_existing specified but database does not exist, dbname={0}'.format(safestr(dbname))) return - crx.execute('SELECT rolname FROM pg_roles') + dbexecute(crx,'SELECT rolname FROM pg_roles') existingroles = [ x[0] for x in crx ] admu = descs['admin']['user'] usru = descs['user']['user'] @@ -378,36 +434,36 @@ def create_database(**kwargs): cvwr = '{0}_common_viewer_role'.format(dbname) schm = '{0}_db_common'.format(dbname) if admu not in existingroles: - crx.execute('CREATE USER {0} WITH PASSWORD %s'.format(admu), (descs['admin']['password'],)) + dbexecute(crx,'CREATE USER {0} WITH PASSWORD %s'.format(admu), (descs['admin']['password'],)) if usru not in existingroles: - crx.execute('CREATE USER {0} WITH PASSWORD %s'.format(usru), (descs['user']['password'],)) + dbexecute(crx,'CREATE USER {0} WITH PASSWORD %s'.format(usru), (descs['user']['password'],)) if vwru not in existingroles: - crx.execute('CREATE USER {0} WITH PASSWORD %s'.format(vwru), (descs['viewer']['password'],)) + dbexecute(crx,'CREATE USER {0} WITH PASSWORD %s'.format(vwru), (descs['viewer']['password'],)) if cusr not in existingroles: - crx.execute('CREATE ROLE {0}'.format(cusr)) + dbexecute(crx,'CREATE ROLE {0}'.format(cusr)) if cvwr not in existingroles: - crx.execute('CREATE ROLE {0}'.format(cvwr)) + dbexecute(crx,'CREATE ROLE {0}'.format(cvwr)) if dbname not in existingdbs: - crx.execute('CREATE DATABASE {0} WITH OWNER {1}'.format(dbname, admu)) + dbexecute(crx,'CREATE DATABASE {0} WITH OWNER {1}'.format(dbname, admu)) crx.close() with rootconn(info, dbname) as dbconn: crz = dbconn.cursor() for r in [ cusr, cvwr, usru, vwru ]: - crz.execute('REVOKE ALL ON DATABASE {0} FROM {1}'.format(dbname, r)) - crz.execute('GRANT {0} TO {1}'.format(cvwr, cusr)) - crz.execute('GRANT {0} TO {1}'.format(cusr, admu)) - crz.execute('GRANT CONNECT ON DATABASE {0} TO {1}'.format(dbname, cvwr)) - crz.execute('CREATE SCHEMA IF NOT EXISTS {0} AUTHORIZATION {1}'.format(schm, admu)) + dbexecute(crz,'REVOKE ALL ON DATABASE {0} FROM {1}'.format(dbname, r)) + dbexecute(crz,'GRANT {0} TO {1}'.format(cvwr, cusr)) + dbexecute(crz,'GRANT {0} TO {1}'.format(cusr, admu)) + dbexecute(crz,'GRANT CONNECT ON DATABASE {0} TO {1}'.format(dbname, cvwr)) + dbexecute(crz,'CREATE SCHEMA IF NOT EXISTS {0} AUTHORIZATION {1}'.format(schm, admu)) for r in [ admu, cusr, cvwr, usru, vwru ]: - crz.execute('ALTER ROLE {0} IN DATABASE {1} SET search_path = public, {2}'.format(r, dbname, schm)) - crz.execute('GRANT USAGE ON SCHEMA {0} to {1}'.format(schm, cvwr)) - crz.execute('GRANT CREATE ON SCHEMA {0} to {1}'.format(schm, admu)) - crz.execute('ALTER DEFAULT PRIVILEGES FOR ROLE {0} GRANT SELECT ON TABLES TO {1}'.format(admu, cvwr)) - crz.execute('ALTER DEFAULT PRIVILEGES FOR ROLE {0} GRANT INSERT, UPDATE, DELETE, TRUNCATE ON TABLES TO {1}'.format(admu, cusr)) - crz.execute('ALTER DEFAULT PRIVILEGES FOR ROLE {0} GRANT USAGE, SELECT, UPDATE ON SEQUENCES TO {1}'.format(admu, cusr)) - crz.execute('GRANT TEMP ON DATABASE {0} TO {1}'.format(dbname, cusr)) - crz.execute('GRANT {0} to {1}'.format(cusr, usru)) - crz.execute('GRANT {0} to {1}'.format(cvwr, vwru)) + dbexecute(crz,'ALTER ROLE {0} IN DATABASE {1} SET search_path = public, {2}'.format(r, dbname, schm)) + dbexecute(crz,'GRANT USAGE ON SCHEMA {0} to {1}'.format(schm, cvwr)) + dbexecute(crz,'GRANT CREATE ON SCHEMA {0} to {1}'.format(schm, admu)) + dbexecute(crz,'ALTER DEFAULT PRIVILEGES FOR ROLE {0} GRANT SELECT ON TABLES TO {1}'.format(admu, cvwr)) + dbexecute(crz,'ALTER DEFAULT PRIVILEGES FOR ROLE {0} GRANT INSERT, UPDATE, DELETE, TRUNCATE ON TABLES TO {1}'.format(admu, cusr)) + dbexecute(crz,'ALTER DEFAULT PRIVILEGES FOR ROLE {0} GRANT USAGE, SELECT, UPDATE ON SEQUENCES TO {1}'.format(admu, cusr)) + dbexecute(crz,'GRANT TEMP ON DATABASE {0} TO {1}'.format(dbname, cusr)) + dbexecute(crz,'GRANT {0} to {1}'.format(cusr, usru)) + dbexecute(crz,'GRANT {0} to {1}'.format(cvwr, vwru)) crz.close() warn('All done') except Exception as e: @@ -440,9 +496,9 @@ def delete_database(**kwargs): vwru = ctx.instance.runtime_properties['viewer']['user'] cusr = '{0}_common_user_role'.format(dbname) cvwr = '{0}_common_viewer_role'.format(dbname) - crx.execute('DROP DATABASE IF EXISTS {0}'.format(dbname)) + dbexecute(crx,'DROP DATABASE IF EXISTS {0}'.format(dbname)) for r in [ usru, vwru, admu, cusr, cvwr ]: - crx.execute('DROP ROLE IF EXISTS {0}'.format(r)) + dbexecute(crx,'DROP ROLE IF EXISTS {0}'.format(r)) warn('All gone') except Exception as e: ctx.logger.warn("Error: {0}".format(e)) diff --git a/pgaas/pgaas_types.yaml b/pgaas/pgaas_types.yaml index 51f0b85..d98d326 100644 --- a/pgaas/pgaas_types.yaml +++ b/pgaas/pgaas_types.yaml @@ -1,12 +1,11 @@ +# -*- indent-tabs-mode: nil -*- # vi: set expandtab: tosca_definitions_version: cloudify_dsl_1_3 -imports: - - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml plugins: pgaas: executor: central_deployment_agent package_name: pgaas - package_version: 1.0.0 + package_version: 1.1.0 node_types: dcae.nodes.pgaas.cluster: @@ -23,10 +22,19 @@ node_types: description: 'FQDN used for read-only access to the cluster (default - same as writerfqdn)' type: string default: '' + port: + description: 'Port used for access to the cluster' + type: string + default: '5432' + initialpassword: + description: 'Password of existing PG instance to take control of' + type: string + default: '' interfaces: cloudify.interfaces.lifecycle: create: pgaas.pgaas.pgaas_plugin.add_pgaas_cluster delete: pgaas.pgaas.pgaas_plugin.rm_pgaas_cluster + dcae.nodes.pgaas.database: derived_from: cloudify.nodes.Root properties: diff --git a/pgaas/pom.xml b/pgaas/pom.xml index d9504e7..1c8eb5c 100644 --- a/pgaas/pom.xml +++ b/pgaas/pom.xml @@ -41,7 +41,7 @@ limitations under the License. <!-- path of types file itself relative to this file --> <typefile.source>pgaas_types.yaml</typefile.source> <!-- path, in repo, to store type file --> - <typefile.dest>type_files/pgaas/pgaas_types.yaml</typefile.dest> + <typefile.dest>type_files/pgaas/1.1.0/pgaas_types.yaml</typefile.dest> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <sonar.sources>.</sonar.sources> <sonar.junit.reportsPath>xunit-results.xml</sonar.junit.reportsPath> @@ -227,11 +227,11 @@ limitations under the License. <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID> <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION> <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY> - <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME> - <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR> <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD> <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD> <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID> + <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME> + <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR> </environmentVariables> </configuration> </execution> @@ -252,11 +252,11 @@ limitations under the License. <MVN_PROJECT_ARTIFACTID>${project.artifactId}</MVN_PROJECT_ARTIFACTID> <MVN_PROJECT_VERSION>${project.version}</MVN_PROJECT_VERSION> <MVN_NEXUSPROXY>${onap.nexus.url}</MVN_NEXUSPROXY> - <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME> - <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR> <MVN_RAWREPO_BASEURL_UPLOAD>${onap.nexus.rawrepo.baseurl.upload}</MVN_RAWREPO_BASEURL_UPLOAD> <MVN_RAWREPO_BASEURL_DOWNLOAD>${onap.nexus.rawrepo.baseurl.download}</MVN_RAWREPO_BASEURL_DOWNLOAD> <MVN_RAWREPO_SERVERID>${onap.nexus.rawrepo.serverid}</MVN_RAWREPO_SERVERID> + <PLUGIN_NAME>${plugin.name}</PLUGIN_NAME> + <PLUGIN_SUBDIR>${plugin.subdir}</PLUGIN_SUBDIR> </environmentVariables> </configuration> </execution> diff --git a/pgaas/setup.py b/pgaas/setup.py index 5ab5719..5454a37 100644 --- a/pgaas/setup.py +++ b/pgaas/setup.py @@ -1,8 +1,26 @@ +# org.onap.ccsdk +# ============LICENSE_START==================================================== +# ============================================================================= +# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved. +# ============================================================================= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END====================================================== + from setuptools import setup, find_packages setup( name="pgaas", - version="1.0.0", + version="1.1.0", packages=find_packages(), author="AT&T", description=("Cloudify plugin for pgaas/pgaas."), diff --git a/pgaas/tests/test_plugin.py b/pgaas/tests/test_plugin.py index 5561f16..197654e 100644 --- a/pgaas/tests/test_plugin.py +++ b/pgaas/tests/test_plugin.py @@ -1,7 +1,7 @@ # ============LICENSE_START==================================================== # org.onap.ccsdk # ============================================================================= -# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. +# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved. # ============================================================================= # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -60,6 +60,15 @@ class MockRuntimeProperties(object): def runtime_properties(self): return self._runtime_properties +class MockSocket(object): + def __init__(self): + pass + def connect(self,host=None,port=None): + pass + def close(self): + pass + + def _connect(h,p): return { } @@ -70,7 +79,9 @@ def set_mock_context(msg, monkeypatch): 'writerfqdn': 'test.bar.example.com', 'use_existing': False, 'readerfqdn': 'test-ro.bar.example.com', - 'name': 'testdb' + 'name': 'testdb', + 'port': '5432', + 'initialpassword': 'test' } sshkeyprops = { @@ -108,14 +119,6 @@ def test_add_pgaas_cluster(monkeypatch): current_ctx.clear() os.system("echo After test; ls -l /tmp/pgaas") #### DELETE -class MockSocket(object): - def __init__(self): - pass - def connect(self,host=None,port=None): - pass - def close(self): - pass - @pytest.mark.dependency(depends=['test_add_pgaas_cluster']) def test_add_database(monkeypatch): try: diff --git a/pgaas/tox.ini b/pgaas/tox.ini index 76daae0..e8b2b52 100644 --- a/pgaas/tox.ini +++ b/pgaas/tox.ini @@ -20,13 +20,9 @@ envlist = py27 [testenv] deps= - requests - cloudify==3.4 pytest + cloudify==4.2 + requests coverage pytest-cov -setenv= - PYTHONPATH={toxinidir} -commands= - pytest --junitxml xunit-results.xml --cov pgaas --cov-report xml - coverage xml +commands=pytest --junitxml xunit-results.xml --cov --cov-report=xml |