summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO.yaml6
-rw-r--r--aria/aria-rest-server/src/main/python/aria-rest/LICENSE18
-rw-r--r--aria/aria-rest-server/src/main/python/aria-rest/__init__.py19
-rw-r--r--aria/aria-rest-server/src/main/python/aria-rest/aria_rest/__init__.py19
-rw-r--r--aria/aria-rest-server/src/main/python/aria-rest/aria_rest/rest.py657
-rw-r--r--aria/aria-rest-server/src/main/python/aria-rest/aria_rest/templates/index.html23
-rw-r--r--aria/aria-rest-server/src/main/python/aria-rest/aria_rest/util.py48
-rw-r--r--aria/aria-rest-server/src/main/python/aria-rest/rest.py57
-rw-r--r--aria/aria-rest-server/src/main/python/aria-rest/setup.py42
-rw-r--r--aria/aria-rest-server/src/main/python/aria-rest/templates/index.html3
-rw-r--r--aria/multivim-plugin/.gitignore63
-rw-r--r--aria/multivim-plugin/.travis.yml18
-rw-r--r--aria/multivim-plugin/build.py0
-rw-r--r--aria/multivim-plugin/pom.xml97
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/CHANGELOG.txt30
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/LICENSE202
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/Makefile39
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/README.md11
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/README.rst4
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/__init__.py14
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/tests/__init__.py14
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/tests/test_volume.py342
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/volume.py125
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/circle.yml27
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/dev-requirements.txt3
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/__init__.py14
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/image.py177
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/tests/resources/test-image-start.yaml30
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/tests/test.py148
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/__init__.py14
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/project.py150
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/tests/__init__.py0
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/tests/test.py115
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/__init__.py1
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/floatingip.py104
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/network.py109
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/port.py222
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/router.py215
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/security_group.py130
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/subnet.py101
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/__init__.py1
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test.py220
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test_port.py156
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test_security_group.py115
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/__init__.py16
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/floatingip.py60
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/keypair.py202
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/security_group.py81
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/server.py944
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/__init__.py0
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-keypair-validation-blueprint.yaml23
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-server-create-secgroup.yaml31
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-start-operation-retry-blueprint.yaml31
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_relationships.py228
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_server.py551
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_server_image_and_flavor.py228
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_userdata.py63
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_validation.py194
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/userdata.py50
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/__init__.py1005
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/floatingip.py84
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/security_group.py148
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/__init__.py0
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/openstack_client_tests.py849
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/provider-context.json78
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/test.py40
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/plugin.yaml1178
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/readthedocs.yml1
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/setup.py45
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/__init__.py2
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/openstack_handler.py657
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/openstack_nova_net_handler.py98
-rw-r--r--aria/multivim-plugin/src/main/python/multivim-plugin/tox.ini44
-rw-r--r--bpmn/MSOCoreBPMN/src/main/java/org/openecomp/mso/bpmn/core/PropertyConfiguration.java4
-rw-r--r--bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/PropertyConfigurationTest.java2
-rw-r--r--bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/TestBaseTask.java9
-rw-r--r--bpmn/MSOInfrastructureBPMN/src/main/groovy/org/openecomp/mso/bpmn/infrastructure/scripts/CreateActivateSDNCResource.groovy425
-rw-r--r--bpmn/MSOInfrastructureBPMN/src/main/groovy/org/openecomp/mso/bpmn/infrastructure/scripts/DoCreateE2EServiceInstance.groovy15
-rw-r--r--bpmn/MSOInfrastructureBPMN/src/main/java/org/openecomp/mso/bpmn/infrastructure/workflow/service/ServicePluginFactory.java443
-rw-r--r--bpmn/MSOInfrastructureBPMN/src/main/java/org/openecomp/mso/bpmn/infrastructure/workflow/serviceTask/client/builder/AbstractBuilder.java2
-rw-r--r--bpmn/MSOInfrastructureBPMN/src/main/resources/process/CreateActivateSDNCResource.bpmn393
-rw-r--r--bpmn/MSOInfrastructureBPMN/src/main/resources/subprocess/DoCreateE2EServiceInstance.bpmn116
-rw-r--r--bpmn/pom.xml1
-rw-r--r--docs/Building_SO.rst2
84 files changed, 1357 insertions, 10889 deletions
diff --git a/INFO.yaml b/INFO.yaml
index 990e63b893..85aebba3ce 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -33,11 +33,6 @@ committers:
company: 'Ericsson'
id: 'byungwoojun'
timezone: ''
- - name: 'Christophe Closset'
- email: 'cc697w@intl.att.com'
- company: 'ATT'
- id: 'ChrisC'
- timezone: 'Belgium/Namur'
- name: 'Claude Noshpitz'
email: 'claude.noshpitz@att.com'
company: 'ATT'
@@ -115,6 +110,7 @@ tsc:
name: 'Tal Liron'
name: 'Heliu Zhong'
name: 'Yuanwei Yang'
+ name: 'Christophe Closset'
link: 'https://lists.onap.org/pipermail/onap-tsc/2018-May/004802.html'
- type: 'addition'
name: 'Marcus Williams'
diff --git a/aria/aria-rest-server/src/main/python/aria-rest/LICENSE b/aria/aria-rest-server/src/main/python/aria-rest/LICENSE
deleted file mode 100644
index 270877b831..0000000000
--- a/aria/aria-rest-server/src/main/python/aria-rest/LICENSE
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# ============LICENSE_START===================================================
-# Copyright (c) 2017 Cloudify.co. All rights reserved.
-# ===================================================================
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy
-# of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-# ============LICENSE_END====================================================
-#
-
diff --git a/aria/aria-rest-server/src/main/python/aria-rest/__init__.py b/aria/aria-rest-server/src/main/python/aria-rest/__init__.py
deleted file mode 100644
index 5e93dc2ae5..0000000000
--- a/aria/aria-rest-server/src/main/python/aria-rest/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-
-#
-# ============LICENSE_START===================================================
-# Copyright (c) 2017 Cloudify.co. All rights reserved.
-# ===================================================================
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy
-# of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-# ============LICENSE_END====================================================
-#
-
diff --git a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/__init__.py b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/__init__.py
deleted file mode 100644
index 5e93dc2ae5..0000000000
--- a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-
-#
-# ============LICENSE_START===================================================
-# Copyright (c) 2017 Cloudify.co. All rights reserved.
-# ===================================================================
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy
-# of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-# ============LICENSE_END====================================================
-#
-
diff --git a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/rest.py b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/rest.py
deleted file mode 100644
index 7b9223d1c9..0000000000
--- a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/rest.py
+++ /dev/null
@@ -1,657 +0,0 @@
-#
-# ============LICENSE_START===================================================
-# Copyright (c) 2017 Cloudify.co. All rights reserved.
-# ===================================================================
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy
-# of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-# ============LICENSE_END====================================================
-#
-
-
-import os
-from flask import Flask, request, jsonify
-from flask_autodoc.autodoc import Autodoc
-from aria import install_aria_extensions
-from aria.parser import consumption
-from aria.utils import formatting, collections
-from aria.cli.core import aria
-from aria.cli import utils
-from aria.exceptions import ParsingError, DependentServicesError
-from aria.core import Core
-from aria.cli import service_template_utils
-from aria.storage import exceptions as storage_exceptions
-from aria.utils import threading
-from aria.orchestrator.workflow_runner import WorkflowRunner
-from aria.orchestrator.workflows.executor.dry import DryExecutor
-import util
-import tempfile
-import shutil
-
-version_id = "0.1"
-route_base = "/api/" + version_id + "/"
-app = Flask("onap-aria-rest")
-auto = Autodoc(app)
-
-# TODO Garbage collect this dict somehow
-execution_state = util.SafeDict()
-
-
-def main():
- install_aria_extensions()
- app.run(host='0.0.0.0', port=5000, threaded=True)
-
-
-@app.route("/")
-@app.route("/api")
-@app.route("/docs")
-def index():
- return auto.html()
-
-
-###
-# TEMPLATES
-###
-
-# add template
-@app.route(route_base + "templates/<template_name>", methods=['PUT'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_resource_storage
-@aria.pass_plugin_manager
-@aria.pass_logger
-def install_template(template_name, model_storage, resource_storage,
- plugin_manager, logger):
-
- """
- installs a template in Aria storage
-
- 3 modes possible:
-
- 1. PUT JSON body which points to a CSAR URL. Content-type must be
- application/json. PUT data is a JSON object/map with the following
- keys.:
- * service_template_path (required): URL to CSAR
- * service_template_filename (optional): service template file.
-
- 2. PUT with service template file body. Content-type must be
- text/plain.
-
- 3. PUT with binary CSAR body. Content-type must be application/zip.
- Optional query string arg "template_filename" can indicate the
- service template filename in the CSAR. Defaults to
- "service-template.yaml".
- """
-
- service_template_path = None
- service_template_filename = "service-template.yaml"
-
- rtype = "unknown"
- if request.is_json:
- rtype = "json"
- elif request.headers['Content-Type'] == "application/zip":
- rtype = "zip"
- suffix = ".csar"
- elif request.headers['Content-Type'] == "text/plain":
- rtype = "yaml"
- suffix = ".yaml"
-
- if rtype == "zip" or rtype == "yaml":
- with tempfile.NamedTemporaryFile(prefix = "ariatmp_",
- suffix = suffix,
- delete = False) as f:
- f.write(request.data)
- service_template_path = f.name
- if request.headers['Content-Type'] == "application/zip":
- if "template_filename" in request.args:
- service_template_filename = request.args["template_filename"]
-
- elif rtype == "json":
-
- body = request.json or {}
-
- # Check body
- if "service_template_path" in body:
- service_template_path = body["service_template_path"]
- else:
- return "request body missing service_template_path", 501
-
- if "service_template_filename" in body:
- service_template_filename = body["service_template_filename"]
- else:
- service_template_filename = "service-template.yaml"
-
- else:
- return "Unrecognized content type",400
-
- service_template_file_path = service_template_utils.get(
- service_template_path, service_template_filename)
-
- core = Core(model_storage, resource_storage, plugin_manager)
-
- try:
- core.create_service_template(service_template_file_path,
- os.path.dirname(service_template_path),
- template_name)
- except storage_exceptions.StorageError as e:
- logger.error("storage exception")
- utils.check_overriding_storage_exceptions(
- e, 'service template', template_name)
- return e.message, 500
- except Exception as e:
- logger.error("catchall exception")
- return e.message, 500
- finally:
- # cleanup
- if rtype == "zip" or rtype == "yaml":
- os.remove(service_template_path)
- if rtype == "zip":
- shutil.rmtree(os.path.dirname(service_template_file_path))
-
- return "service template installed", 200
-
-# validate template
-@app.route(route_base + "templates", methods=['POST'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_resource_storage
-@aria.pass_plugin_manager
-@aria.pass_logger
-def validate_template(model_storage, resource_storage, plugin_manager, logger):
- """
- Validates a TOSCA template
- """
- body = request.json or {}
-
- # Check body
- if "service_template_path" in body:
- service_template_path = body["service_template_path"]
- else:
- return "request body missing service_template_path", 501
- if "service_template_filename" in body:
- service_template_filename = body["service_template_filename"]
- else:
- service_template_filename = "service-template.yaml"
-
- service_template_path = service_template_utils.get(
- service_template_path, service_template_filename)
-
- core = Core(model_storage, resource_storage, plugin_manager)
- try:
- context = core.validate_service_template(service_template_path)
- except ParsingError as e:
- return e.message, 400
-
- logger.info('Service template {} validated'.format(service_template_path))
- return "", 200
-
-
-# delete template
-@app.route(route_base + "templates/<template_id>", methods=['DELETE'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_resource_storage
-@aria.pass_plugin_manager
-@aria.pass_logger
-def delete_template(
- template_id,
- model_storage,
- resource_storage,
- plugin_manager,
- logger):
- """
- Deletes a template from Aria storage
- """
-
- logger.info('Deleting service template {}'.format(template_id))
- core = Core(model_storage, resource_storage, plugin_manager)
- try:
- core.delete_service_template(template_id)
- except DependentServicesError as e:
- logger.error("dependent services error")
- return e.message, 400
- except Exception as e:
- logger.error("failed")
- return "Failed to delete template", 500
-
- logger.info('Service template {} deleted'.format(template_id))
- return "", 200
-
-
-# get template json
-@app.route(route_base + "templates/<template_id>/json", methods=['GET'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def get_template_json(template_id, model_storage, logger):
- """ get JSON representation of template """
- template = model_storage.service_template.get(template_id)
- consumption.ConsumptionContext()
- body = formatting.json_dumps(collections.prune(template.as_raw))
- return body
-
-
-# list templates
-@app.route(route_base + "templates", methods=['GET'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def list_templates(model_storage, logger):
- """
- Lists templates installed in Aria storage
- """
- list = model_storage.service_template.list()
- templates = []
- for item in list:
- templates.append({"name": item.name,
- "id": item.id,
- "description": item.description
- })
- return jsonify(templates)
-
-
-# list nodes
-@app.route(route_base + "templates/<template_id>/nodes", methods=['GET'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def list_nodes_by_template(template_id, model_storage, logger):
- """
- Lists node templates in specified Aria template
- """
- service_template = model_storage.service_template.get(template_id)
- filters = dict(service_template=service_template)
- nodes = model_storage.node_template.list(filters=filters)
- nodelist = []
-
- for node in nodes:
- nodelist.append({
- "id": node.id,
- "name": node.name,
- "description": node.description,
- "service_template_id": service_template.id,
- "type_name": node.type_name
- })
- return jsonify(nodelist), 200
-
-
-# show node details
-@app.route(route_base + "nodes/<node_id>", methods=['GET'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def get_node(node_id, model_storage, logger):
- """
- Get node details
- """
- node_template = model_storage.node_template.get(node_id)
- service_template = model_storage.service_template.get_by_name(
- node_template.service_template_name)
- retmap = {}
- retmap['id'] = node_id
- retmap['name'] = node_template.name
- retmap['description'] = node_template.description
- retmap['service_template_id'] = service_template.id
- retmap['type_name'] = node_template.type_name
- return jsonify(retmap), 200
-
-###
-# SERVICES
-###
-
-
-# list services
-@app.route(route_base + "services", methods=['GET'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def list_services(model_storage, logger):
- """
- Lists all services
- """
- services_list = model_storage.service.list()
- outlist = []
- for service in services_list:
- outlist.append({"id": service.id,
- "description": service.description,
- "name": service.name,
- "service_template": service.service_template.name,
- "created": service.created_at,
- "updated": service.updated_at})
- return jsonify(outlist), 200
-
-
-# show service
-@app.route(route_base + "services/<service_id>", methods=['GET'])
-def show_service(service_id):
- """
- Returns details for specified servie
- """
- return "not implemented", 501
-
-
-# get service outputs
-@app.route(route_base + "services/<service_id>/outputs", methods=['GET'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def get_service_outputs(service_id, model_storage, logger):
- """
- Gets outputs for specified service
- """
- service = model_storage.service.get(service_id)
- outlist = []
- for output_name, output in service.outputs.iteritems():
- outlist.append({"name": output_name, "description": output.description,
- "value": output.value})
- return jsonify(outlist)
-
-
-# get service inputs
-@app.route(route_base + "services/<service_id>/inputs", methods=['GET'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def get_service_inputs(service_id, model_storage, logger):
- """
- Gets inputs for specified service
- """
- service = model_storage.service.get(service_id)
- outlist = []
- for input_name, input in service.inputs.iteritems():
- outlist.append({"name": input_name, "description": input.description,
- "value": input.value})
- return jsonify(outlist)
-
-
-# create service
-@app.route(route_base + "templates/<template_id>/services/<service_name>",
- methods=['POST'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_resource_storage
-@aria.pass_plugin_manager
-@aria.pass_logger
-def create_service(template_id, service_name, model_storage, resource_storage,
- plugin_manager, logger):
- """
- Creates a service from the specified service template
- """
- body = request.json or {}
- inputs = {}
- if 'inputs' in body:
- inputs = body['inputs']
- core = Core(model_storage, resource_storage, plugin_manager)
- service = core.create_service(template_id, inputs, service_name)
-
- logger.info("service {} created".format(service.name))
- return "service {} created".format(service.name), 200
-
-
-# delete service
-@app.route(route_base + "services/<service_id>", methods=['DELETE'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_resource_storage
-@aria.pass_plugin_manager
-@aria.pass_logger
-def delete_service(
- service_id,
- model_storage,
- resource_storage,
- plugin_manager,
- logger):
- """
- Deletes the specified servi e
- """
- service = model_storage.service.get(service_id)
- core = Core(model_storage, resource_storage, plugin_manager)
- core.delete_service(service_id, force=True)
- return "service {} deleted".format(service.id), 200
-
-
-###
-# WORKFLOWS
-###
-
-
-# list workflows
-@app.route(route_base + "services/<service_id>/workflows", methods=['GET'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def list_workflows(service_id, model_storage, logger):
- """
- Lists all defined user workflows for the specified service
- """
- service = model_storage.service.get(service_id)
- workflows = service.workflows.itervalues()
- outlist = []
- for workflow in workflows:
- outlist.append(workflow.name)
- return jsonify(outlist), 200
-
-
-# show workflow
-@app.route(
- route_base +
- "services/<service_id>/workflow/<workflow_name>",
- methods=['GET'])
-def show_workflow(service_name, workflow_name):
- """
- Returns details of specified workflow
- """
- return "not implemented", 501
-
-###
-# EXECUTIONS
-###
-
-
-# list all executions
-@app.route(route_base + "executions", methods=['GET'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def list_executions(model_storage, logger):
- """
- Return all executions
- """
- elist = model_storage.execution.list()
- outlist = []
- for execution in elist:
- outlist.append(
- {"execution_id": execution.id,
- "workflow_name": execution.workflow_name,
- "service_template_name": execution.service_template_name,
- "service_name": execution.service_name,
- "status": execution.status})
- return jsonify(outlist), 200
-
-
-# list executions for service
-@app.route(route_base + "services/<service_id>/executions", methods=['GET'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def list_service_executions(service_id, model_storage, logger):
- """
- Return all executions for specified service
- """
- service = model_storage.service.get(service_id)
- elist = model_storage.execution.list(filters=dict(service=service))
- outlist = []
- for execution in elist:
- outlist.append(
- {"execution_id": execution.id,
- "workflow_name": execution.workflow_name,
- "service_template_name": execution.service_template_name,
- "service_name": execution.service_name,
- "status": execution.status})
- return jsonify(outlist), 200
-
-
-# show execution
-@app.route(route_base + "executions/<execution_id>", methods=['GET'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def show_execution(execution_id, model_storage, logger):
- """
- Return details of specified execution
- """
- try:
- execution = model_storage.execution.get(execution_id)
- except BaseException:
- return "Execution {} not found".format(execution_id), 404
-
- return jsonify({"execution_id": execution_id,
- "service_name": execution.service_name,
- "service_template_name": execution.service_template_name,
- "workflow_name": execution.workflow_name,
- "status": execution.status}), 200
-
-# start execution
-
-
-# TODO allow executors other than default and dry to be used
-@app.route(
- route_base +
- "services/<service_id>/executions/<workflow_name>",
- methods=['POST'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_resource_storage
-@aria.pass_plugin_manager
-@aria.pass_logger
-def start_execution(
- service_id,
- workflow_name,
- model_storage,
- resource_storage,
- plugin_manager,
- logger):
- """
- Start an execution for the specified service
- """
- body = request.json or {}
- executor = DryExecutor(
- ) if 'executor' in body and body['executor'] == 'dry' else None
-
- inputs = body['inputs'] if 'inputs' in body else None
- task_max_attempts = (body['task_max_attempts']
- if 'task_max_attempts' in body else 30)
- task_retry_interval = (body['task_retry_interval']
- if 'task_retry_interval' in body else 30)
-
- runner = WorkflowRunner(model_storage, resource_storage, plugin_manager,
- service_id=service_id,
- workflow_name=workflow_name,
- inputs=inputs,
- executor=executor,
- task_max_attempts=task_max_attempts,
- task_retry_interval=task_retry_interval)
-
- service = model_storage.service.get(service_id)
- tname = '{}_{}_{}'.format(service.name, workflow_name, runner.execution_id)
- thread = threading.ExceptionThread(target=runner.execute,
- name=tname)
- thread.start()
- execution_state[str(runner.execution_id)] = [runner, thread]
- return jsonify({"id": runner.execution_id}), 202
-
-
-# resume execution
-@app.route(route_base + "executions/<execution_id>", methods=['POST'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_resource_storage
-@aria.pass_plugin_manager
-@aria.pass_logger
-def resume_execution(
- execution_id,
- model_storage,
- resource_storage,
- plugin_manager,
- logger):
- """
- Resume the specified execution
- """
- body = request.json or {}
- execution = model_storage.execution.get(execution_id)
- if execution.status != execution.status.CANCELLED:
- return "cancelled execution cannot be resumed", 400
- executor = DryExecutor(
- ) if 'executor' in body and body['executor'] == 'dry' else None
- retry_failed_tasks = body['retry_failed_tasks'] \
- if 'retry_failed_tasks' in body else False
-
- runner = WorkflowRunner(model_storage, resource_storage, plugin_manager,
- execution_id=execution_id,
- executor=executor,
- retry_failed_tasks=retry_failed_tasks)
-
- tname = '{}_{}_{}'.format(execution.service.name, execution.workflow_name,
- runner.execution_id)
- thread = threading.ExceptionThread(target=runner.execute,
- name=tname,
- daemon=True)
- thread.start()
- execution_state[str(runner.execution_id)] = [runner, thread]
- return jsonify({"id": runner.execution_id}), 202
-
-
-# cancel execution
-@app.route(route_base + "executions/<execution_id>", methods=['DELETE'])
-@auto.doc()
-@aria.pass_model_storage
-@aria.pass_logger
-def cancel_execution(execution_id, model_storage, logger):
- """
- Cancel the specified execution
- """
- logger.info("cancelling execution {}".format(execution_id))
- body = request.json or {}
-
- try:
- execution = model_storage.execution.get(execution_id)
- except BaseException:
- return "Execution {} not found".format(execution_id), 404
-
- if (not execution.status == execution.PENDING and
- not execution.status == execution.STARTED):
- return "Cancel ignored. Execution state = {}".format(
- execution.status), 200
-
- if execution_id not in execution_state:
- logger.error("id {} not found".format(execution_id))
- return "execution id {} not found".format(execution_id), 400
-
- einfo = execution_state[execution_id]
- runner = einfo[0]
- thread = einfo[1]
- timeout = 30 # seconds to wait for thread death
- if 'timeout' in body:
- timeout = body['timeout']
-
- runner.cancel()
- while thread.is_alive() and timeout > 0:
- thread.join(1)
- if not thread.is_alive():
- return "execution {} cancelled".format(execution_id), 200
- timeout = timeout - 1
- if timeout == 0:
- return "execution cancel timed out", 500
- return "execution {} cancelled".format(execution_id), 200
-
-
-if __name__ == "__main__":
- app.run(host='0.0.0.0', port=5000, threaded=True)
diff --git a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/templates/index.html b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/templates/index.html
deleted file mode 100644
index e9a5e2ea00..0000000000
--- a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/templates/index.html
+++ /dev/null
@@ -1,23 +0,0 @@
-<!--
-#
-# ============LICENSE_START===================================================
-# Copyright (c) 2017 Cloudify.co. All rights reserved.
-# ===================================================================
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy
-# of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-# ============LICENSE_END====================================================
-#
--->
-
-<body>
-<h1>Not Implemented</h1>
-</body>
diff --git a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/util.py b/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/util.py
deleted file mode 100644
index 2310d7eddf..0000000000
--- a/aria/aria-rest-server/src/main/python/aria-rest/aria_rest/util.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#
-# ============LICENSE_START===================================================
-# Copyright (c) 2017 Cloudify.co. All rights reserved.
-# ===================================================================
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy
-# of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-# ============LICENSE_END====================================================
-#
-
-
-import threading
-
-def make_template_name( user, template_name ):
- return "{}.{}".format(user,template_name)
-
-
-class SafeDict(dict):
- def __init__(self, *args):
- self._lockobj = threading.Lock()
- dict.__init__(self, args)
-
- def __getitem__(self, key):
- try:
- self._lockobj.acquire()
- val = dict.__getitem__(self, key)
- except:
- raise
- finally:
- self._lockobj.release()
-
- def __setitem__(self, key, value):
- try:
- self._lockobj.acquire()
- dict.__setitem__(self, key, value)
- except:
- raise
- finally:
- self._lockobj.release()
-
diff --git a/aria/aria-rest-server/src/main/python/aria-rest/rest.py b/aria/aria-rest-server/src/main/python/aria-rest/rest.py
deleted file mode 100644
index 6669ac39ee..0000000000
--- a/aria/aria-rest-server/src/main/python/aria-rest/rest.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# ============LICENSE_START===================================================
-# Copyright (c) 2017 Cloudify.co. All rights reserved.
-# ===================================================================
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy
-# of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-# ============LICENSE_END====================================================
-#
-
-from flask import Flask, render_template
-from aria.exceptions import AriaException
-
-version_id = "0.1"
-route_base = "/api/" + version_id + "/"
-app = Flask("onap-aria-rest")
-
-@app.route("/")
-def index():
- return render_template('index.html')
-
-
-@app.route(route_base + "templates/", methods = ['GET'])
-def list_templates():
-
-@app.route(route_base + "templates/<template_id>", methods = ['POST'])
-def install_template( template_id ):
-
- # GET CSAR FROM SDC
-
- # DEPLOY CSAR
-
- # UPDATE A&AI?
-
- return "template {} instantiated"
-
-@app.route(route_base + "templates/<template_id>", methods = ['DELETE'])
-def delete_template( template_id ):
-
- # RUN UNINSTALL
-
- # DELETE TEMPLATE
-
- # UPDATE A&AI?
-
- return "template {} deleted"
-
-if __name__ == "__main__":
- app.run()
diff --git a/aria/aria-rest-server/src/main/python/aria-rest/setup.py b/aria/aria-rest-server/src/main/python/aria-rest/setup.py
deleted file mode 100644
index 84e9a19560..0000000000
--- a/aria/aria-rest-server/src/main/python/aria-rest/setup.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# ============LICENSE_START===================================================
-# Copyright (c) 2017 Cloudify.co. All rights reserved.
-# ===================================================================
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy
-# of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-# ============LICENSE_END====================================================
-#
-
-
-from setuptools import setup
-
-setup(
- zip_safe=True,
- name='aria-rest',
- version='0.1',
- author='dewayne',
- author_email='dewayne@cloudify.co',
- packages=[
- 'aria_rest'
- ],
- entry_points = {
- 'console_scripts' : ['aria-rest=aria_rest.rest:main']
- },
- license='LICENSE',
- description='Aria REST API for ONAP',
- install_requires=[
- 'distribute',
- 'Flask==0.12.2',
- 'flask-autodoc==0.1.2',
- 'apache-ariatosca==0.1.1'
- ]
-)
diff --git a/aria/aria-rest-server/src/main/python/aria-rest/templates/index.html b/aria/aria-rest-server/src/main/python/aria-rest/templates/index.html
deleted file mode 100644
index 6d74cfc0f8..0000000000
--- a/aria/aria-rest-server/src/main/python/aria-rest/templates/index.html
+++ /dev/null
@@ -1,3 +0,0 @@
-<body>
-<h1>Not Implemented</h1>
-</body>
diff --git a/aria/multivim-plugin/.gitignore b/aria/multivim-plugin/.gitignore
deleted file mode 100644
index ce50313b79..0000000000
--- a/aria/multivim-plugin/.gitignore
+++ /dev/null
@@ -1,63 +0,0 @@
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-env/
-bin/
-build/
-develop-eggs/
-dist/
-eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-*.egg-info/
-.installed.cfg
-*.egg
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.coverage
-.cache
-nosetests.xml
-coverage.xml
-
-# Translations
-*.mo
-
-# Mr Developer
-.mr.developer.cfg
-.project
-.pydevproject
-
-# Rope
-.ropeproject
-
-# Django stuff:
-*.log
-*.pot
-
-# Sphinx documentation
-docs/_build/
-
-*.iml
-
-*COMMIT_MSG
-
-# QuickBuild
-.qbcache/
-
-.idea/
-
diff --git a/aria/multivim-plugin/.travis.yml b/aria/multivim-plugin/.travis.yml
deleted file mode 100644
index 8653f2f76a..0000000000
--- a/aria/multivim-plugin/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: python
-sudo: false
-python:
- - "2.7"
-env:
- # - TOX_ENV=docs
- - TOX_ENV=flake8
- - TOX_ENV=py27
-# TODO: add coveralls support
-install:
- - pip install tox
- # - pip install coveralls
-script:
- - tox -e $TOX_ENV
-# after_success:
-# coveralls
-notifications:
- flowdock: 1f4ec6febcf1ac9b35ae6c1f0049471f
diff --git a/aria/multivim-plugin/build.py b/aria/multivim-plugin/build.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/aria/multivim-plugin/build.py
+++ /dev/null
diff --git a/aria/multivim-plugin/pom.xml b/aria/multivim-plugin/pom.xml
deleted file mode 100644
index 87326ff01c..0000000000
--- a/aria/multivim-plugin/pom.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <packaging>pom</packaging>
- <groupId>org.onap.so</groupId>
- <artifactId>multivim-plugin</artifactId>
- <name>multivim-plugin</name>
- <description>ARIA MultiVIM plugin</description>
- <parent>
- <groupId>org.onap.so</groupId>
- <artifactId>aria</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <relativePath>../pom.xml</relativePath>
- </parent>
-
- <properties>
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <python_version>2.7</python_version>
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <wheel.name>${project.name}-${project.version}-py2-none-any.whl</wheel.name>
- <python.sourceDirectory>${project.basedir}/src/main/python/multivim-plugin</python.sourceDirectory>
- <sonar.exclusions>**/*.py</sonar.exclusions>
- <onap.nexus.pypiserver.baseurl>http://192.168.33.1:8081/repository/pypi-internal/</onap.nexus.pypiserver.baseurl>
- <onap.nexus.pypiserver.serverid>ecomp-snapshots</onap.nexus.pypiserver.serverid>
- </properties>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>build-helper-maven-plugin</artifactId>
- <version>3.0.0</version>
- <executions>
- <execution>
- <id>write-python-version</id>
- <goals>
- <goal>regex-property</goal>
- </goals>
- <phase>initialize</phase>
- <configuration>
- <name>python_version</name>
- <regex>-SNAPSHOT</regex>
- <value>${project.version}</value>
- <replacement>\.dev0</replacement>
- <failIfNoMatch>false</failIfNoMatch>
- </configuration>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>exec-maven-plugin</artifactId>
- <version>1.6.0</version>
- <executions>
- <execution>
- <id>package</id>
- <phase>package</phase>
- <goals><goal>exec</goal></goals>
- <configuration>
- <executable>python</executable>
- <arguments>
- <argument>${project.basedir}/build.py</argument>
- </arguments>
- <environmentVariables>
- <MVN_PHASE>package</MVN_PHASE>
- <WHEEL_NAME>${wheel.name}</WHEEL_NAME>
- <INPUT_DIR>${python.sourceDirectory}</INPUT_DIR>
- <OUTPUT_DIR>${project.build.directory}</OUTPUT_DIR>
- </environmentVariables>
- </configuration>
- </execution>
- <execution>
- <id>deploy</id>
- <phase>deploy</phase>
- <goals><goal>exec</goal></goals>
- <configuration>
- <executable>python</executable>
- <arguments>
- <argument>${project.basedir}/build.py</argument>
- </arguments>
- <environmentVariables>
- <MVN_PHASE>deploy</MVN_PHASE>
- <PROJECT_VERSION>${project.version}</PROJECT_VERSION>
- <DOCKERREGISTRY_SNAPSHOT>${onap.nexus.dockerregistry.snapshot}</DOCKERREGISTRY_SNAPSHOT>
- <DOCKERREGISTRY_RELEASE>${onap.nexus.dockerregistry.release}</DOCKERREGISTRY_RELEASE>
- <PYPI_SERVER_BASEURL>${onap.nexus.pypiserver.baseurl}</PYPI_SERVER_BASEURL>
- <PYPI_SERVERID>${onap.nexus.pypiserver.serverid}</PYPI_SERVERID>
- <WHEEL_PATH>${project.build.directory}/${wheel.name}</WHEEL_PATH>
- </environmentVariables>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
-</project>
-
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/CHANGELOG.txt b/aria/multivim-plugin/src/main/python/multivim-plugin/CHANGELOG.txt
deleted file mode 100644
index da9875a5bc..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/CHANGELOG.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-2.2.0
:
- - Fix duplicated mapping key in plugin.yaml.
- - Create Server with security groups from instance relationships. This prevents a window of time when a server can
- be unsecured. (OPENSTACK-38)
- - Fix floating IP detach issue. (OPENSTACK-12)
- - Allow openstack_config as runtime property. (OPENSTACK-112)
- - Fix key creation when folders don't exist. (OPENSTACK-7)
-2.0.1:
- - Don't overwrite server['image'] when server is booted from volume
- - Fix loading auth_url from environment (OPENSTACK-101)
- - Raise an error if server is not attached to a network. Previously an IndexError would be raised.
- - Make sure security_group is removed if a later step (rule creation) fails (OPENSTACK-106)
- - Fix attempt to access `volume.display_name` (is now .name) (OPENSTACK-108)
- - Correctly handle nova_url and neutron_url in openstack_configuration (these are deprecated) (OPENSTACK-109)
-2.0:
- - Don't require a Server image to be specified if a boot_volume is attached
- - Add support for keystone auth v3. auth_url setting must now include version
- - Upgraded openstack library dependencies
- - Use availability_zone from connected boot_volume if Server doesn't specify
- - Embed full docs in plugin repo. Now using sphinxify sphinx extension
-1.5:
- - Create project, assign existing users with roles and customize quotas.
- - Create image from file (local workflow only) or url.
- - Add conditional creation to all resources. Create a resource only if it doesn't already exist. Previously, could
- either use an existing resource, or create it.
- - Boot server from volume. Support boot from block storage and not only from image like in previous versions.
- - Fix connect port to security group race-condition.
- - Get mac address from port after creation.
- - Raise error also when external network is missing in floating ip creation. Previously, an error was raised only
- when floating network id or name was missing.
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/LICENSE b/aria/multivim-plugin/src/main/python/multivim-plugin/LICENSE
deleted file mode 100644
index e06d208186..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/Makefile b/aria/multivim-plugin/src/main/python/multivim-plugin/Makefile
deleted file mode 100644
index cfb7416fa7..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/Makefile
+++ /dev/null
@@ -1,39 +0,0 @@
-.PHONY: release install files test docs prepare publish
-
-all:
- @echo "make release - prepares a release and publishes it"
- @echo "make dev - prepares a development environment"
- @echo "make install - install on local system"
- @echo "make files - update changelog and todo files"
- @echo "make test - run tox"
- @echo "make docs - build docs"
- @echo "prepare - prepare module for release (CURRENTLY IRRELEVANT)"
- @echo "make publish - upload to pypi"
-
-release: test docs publish
-
-dev:
- pip install -rdev-requirements.txt
- python setup.py develop
-
-install:
- python setup.py install
-
-files:
- grep '# TODO' -rn * --exclude-dir=docs --exclude-dir=build --exclude=TODO.md | sed 's/: \+#/: # /g;s/:#/: # /g' | sed -e 's/^/- /' | grep -v Makefile > TODO.md
- git log --oneline --decorate --color > CHANGELOG
-
-test:
- pip install tox
- tox
-
-docs:
- pip install sphinx sphinx-rtd-theme
- cd docs && make html
- pandoc README.md -f markdown -t rst -s -o README.rst
-
-prepare:
- python scripts/make-release.py
-
-publish:
- python setup.py sdist upload \ No newline at end of file
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/README.md b/aria/multivim-plugin/src/main/python/multivim-plugin/README.md
deleted file mode 100644
index 3b5b8df721..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-cloudify-openstack-plugin
-=========================
-
-[![Circle CI](https://circleci.com/gh/cloudify-cosmo/cloudify-openstack-plugin/tree/master.svg?style=shield)](https://circleci.com/gh/cloudify-cosmo/cloudify-openstack-plugin/tree/master)
-[![Build Status](https://travis-ci.org/cloudify-cosmo/cloudify-openstack-plugin.svg?branch=master)](https://travis-ci.org/cloudify-cosmo/cloudify-openstack-plugin)
-
-Cloudify OpenStack Plugin
-
-## Usage
-
-See [Openstack Plugin](http://docs.getcloudify.org/latest/plugins/openstack/)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/README.rst b/aria/multivim-plugin/src/main/python/multivim-plugin/README.rst
deleted file mode 100644
index eaa0de6eaf..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/README.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-cloudify-openstack-plugin
-=========================
-
-Cloudify OpenStack Plugin
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/__init__.py
deleted file mode 100644
index a9dfcc4473..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/tests/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/tests/__init__.py
deleted file mode 100644
index a9dfcc4473..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/tests/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/tests/test_volume.py b/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/tests/test_volume.py
deleted file mode 100644
index 0ee85bc334..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/tests/test_volume.py
+++ /dev/null
@@ -1,342 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import mock
-import unittest
-
-from cloudify import mocks as cfy_mocks
-from cloudify import exceptions as cfy_exc
-from cloudify.state import current_ctx
-from cinder_plugin import volume
-from nova_plugin import server
-from openstack_plugin_common import (OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY)
-
-
-class TestCinderVolume(unittest.TestCase):
-
- def _mock(self, **kwargs):
- ctx = cfy_mocks.MockCloudifyContext(**kwargs)
- current_ctx.set(ctx)
- return ctx
-
- def tearDown(self):
- current_ctx.clear()
-
- def test_create_new(self):
- volume_name = 'fake volume name'
- volume_description = 'fake volume'
- volume_id = '00000000-0000-0000-0000-000000000000'
- volume_size = 10
-
- volume_properties = {
- 'volume': {
- 'size': volume_size,
- 'description': volume_description
- },
- 'use_external_resource': False,
- 'device_name': '/dev/fake',
- 'resource_id': volume_name,
- }
-
- creating_volume_m = mock.Mock()
- creating_volume_m.id = volume_id
- creating_volume_m.status = volume.VOLUME_STATUS_CREATING
- available_volume_m = mock.Mock()
- available_volume_m.id = volume_id
- available_volume_m.status = volume.VOLUME_STATUS_AVAILABLE
- cinder_client_m = mock.Mock()
- cinder_client_m.volumes = mock.Mock()
- cinder_client_m.volumes.create = mock.Mock(
- return_value=creating_volume_m)
- cinder_client_m.volumes.get = mock.Mock(
- return_value=available_volume_m)
- ctx_m = self._mock(node_id='a', properties=volume_properties)
-
- volume.create(cinder_client=cinder_client_m, args={}, ctx=ctx_m,
- status_attempts=10, status_timeout=2)
-
- cinder_client_m.volumes.create.assert_called_once_with(
- size=volume_size,
- name=volume_name,
- description=volume_description)
- cinder_client_m.volumes.get.assert_called_once_with(volume_id)
- self.assertEqual(
- volume_id,
- ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY])
- self.assertEqual(
- volume.VOLUME_OPENSTACK_TYPE,
- ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY])
-
- def test_create_use_existing(self):
- volume_id = '00000000-0000-0000-0000-000000000000'
-
- volume_properties = {
- 'use_external_resource': True,
- 'device_name': '/dev/fake',
- 'resource_id': volume_id,
- }
- existing_volume_m = mock.Mock()
- existing_volume_m.id = volume_id
- existing_volume_m.status = volume.VOLUME_STATUS_AVAILABLE
- cinder_client_m = mock.Mock()
- cinder_client_m.volumes = mock.Mock()
- cinder_client_m.volumes.create = mock.Mock()
- cinder_client_m.cosmo_get_if_exists = mock.Mock(
- return_value=existing_volume_m)
- cinder_client_m.get_id_from_resource = mock.Mock(
- return_value=volume_id)
- ctx_m = self._mock(node_id='a', properties=volume_properties)
-
- volume.create(cinder_client=cinder_client_m, args={}, ctx=ctx_m,
- status_attempts=10, status_timeout=2)
-
- self.assertFalse(cinder_client_m.volumes.create.called)
- self.assertEqual(
- volume_id,
- ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY])
- self.assertEqual(
- volume.VOLUME_OPENSTACK_TYPE,
- ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY])
-
- def test_delete(self):
- volume_id = '00000000-0000-0000-0000-000000000000'
- volume_name = 'test-volume'
-
- volume_properties = {
- 'use_external_resource': False,
- }
-
- cinder_client_m = mock.Mock()
- cinder_client_m.cosmo_delete_resource = mock.Mock()
-
- ctx_m = self._mock(node_id='a', properties=volume_properties)
- ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = volume_id
- ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \
- volume.VOLUME_OPENSTACK_TYPE
- ctx_m.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \
- volume_name
-
- volume.delete(cinder_client=cinder_client_m, ctx=ctx_m)
-
- cinder_client_m.cosmo_delete_resource.assert_called_once_with(
- volume.VOLUME_OPENSTACK_TYPE, volume_id)
- self.assertTrue(
- OPENSTACK_ID_PROPERTY not in ctx_m.instance.runtime_properties)
- self.assertTrue(OPENSTACK_TYPE_PROPERTY
- not in ctx_m.instance.runtime_properties)
- self.assertTrue(OPENSTACK_NAME_PROPERTY
- not in ctx_m.instance.runtime_properties)
-
- @mock.patch('openstack_plugin_common.NovaClientWithSugar')
- @mock.patch('openstack_plugin_common.CinderClientWithSugar')
- @mock.patch.object(volume, 'wait_until_status', return_value=(None, True))
- def test_attach(self, wait_until_status_m, cinder_m, nova_m):
- volume_id = '00000000-0000-0000-0000-000000000000'
- server_id = '11111111-1111-1111-1111-111111111111'
- device_name = '/dev/fake'
-
- volume_ctx = cfy_mocks.MockContext({
- 'node': cfy_mocks.MockContext({
- 'properties': {volume.DEVICE_NAME_PROPERTY: device_name}
- }),
- 'instance': cfy_mocks.MockContext({
- 'runtime_properties': {
- OPENSTACK_ID_PROPERTY: volume_id,
- }
- })
- })
- server_ctx = cfy_mocks.MockContext({
- 'node': cfy_mocks.MockContext({
- 'properties': {}
- }),
- 'instance': cfy_mocks.MockContext({
- 'runtime_properties': {
- server.OPENSTACK_ID_PROPERTY: server_id
- }
- })
- })
-
- ctx_m = self._mock(node_id='a',
- target=server_ctx,
- source=volume_ctx)
-
- nova_instance = nova_m.return_value
- cinder_instance = cinder_m.return_value
-
- server.attach_volume(ctx=ctx_m, status_attempts=10,
- status_timeout=2)
-
- nova_instance.volumes.create_server_volume.assert_called_once_with(
- server_id, volume_id, device_name)
- wait_until_status_m.assert_called_once_with(
- cinder_client=cinder_instance,
- volume_id=volume_id,
- status=volume.VOLUME_STATUS_IN_USE,
- num_tries=10,
- timeout=2,
- )
-
- @mock.patch('openstack_plugin_common.NovaClientWithSugar')
- @mock.patch('openstack_plugin_common.CinderClientWithSugar')
- def _test_cleanup__after_attach_fails(
- self, expected_err_cls, expect_cleanup,
- wait_until_status_m, cinder_m, nova_m):
- volume_id = '00000000-0000-0000-0000-000000000000'
- server_id = '11111111-1111-1111-1111-111111111111'
- attachment_id = '22222222-2222-2222-2222-222222222222'
- device_name = '/dev/fake'
-
- attachment = {'id': attachment_id,
- 'server_id': server_id,
- 'volume_id': volume_id}
-
- volume_ctx = cfy_mocks.MockContext({
- 'node': cfy_mocks.MockContext({
- 'properties': {volume.DEVICE_NAME_PROPERTY: device_name}
- }),
- 'instance': cfy_mocks.MockContext({
- 'runtime_properties': {
- OPENSTACK_ID_PROPERTY: volume_id,
- }
- })
- })
- server_ctx = cfy_mocks.MockContext({
- 'node': cfy_mocks.MockContext({
- 'properties': {}
- }),
- 'instance': cfy_mocks.MockContext({
- 'runtime_properties': {
- server.OPENSTACK_ID_PROPERTY: server_id
- }
- })
- })
-
- ctx_m = self._mock(node_id='a',
- target=server_ctx,
- source=volume_ctx)
-
- attached_volume = mock.Mock(id=volume_id,
- status=volume.VOLUME_STATUS_IN_USE,
- attachments=[attachment])
- nova_instance = nova_m.return_value
- cinder_instance = cinder_m.return_value
- cinder_instance.volumes.get.return_value = attached_volume
-
- with self.assertRaises(expected_err_cls):
- server.attach_volume(ctx=ctx_m, status_attempts=10,
- status_timeout=2)
-
- nova_instance.volumes.create_server_volume.assert_called_once_with(
- server_id, volume_id, device_name)
- volume.wait_until_status.assert_any_call(
- cinder_client=cinder_instance,
- volume_id=volume_id,
- status=volume.VOLUME_STATUS_IN_USE,
- num_tries=10,
- timeout=2,
- )
- if expect_cleanup:
- nova_instance.volumes.delete_server_volume.assert_called_once_with(
- server_id, attachment_id)
- self.assertEqual(2, volume.wait_until_status.call_count)
- volume.wait_until_status.assert_called_with(
- cinder_client=cinder_instance,
- volume_id=volume_id,
- status=volume.VOLUME_STATUS_AVAILABLE,
- num_tries=10,
- timeout=2)
-
- def test_cleanup_after_waituntilstatus_throws_recoverable_error(self):
- err = cfy_exc.RecoverableError('Some recoverable error')
- with mock.patch.object(volume, 'wait_until_status',
- side_effect=[err, (None, True)]) as wait_mock:
- self._test_cleanup__after_attach_fails(type(err), True, wait_mock)
-
- def test_cleanup_after_waituntilstatus_throws_any_not_nonrecov_error(self):
- class ArbitraryNonRecoverableException(Exception):
- pass
- err = ArbitraryNonRecoverableException('An exception')
- with mock.patch.object(volume, 'wait_until_status',
- side_effect=[err, (None, True)]) as wait_mock:
- self._test_cleanup__after_attach_fails(type(err), True, wait_mock)
-
- def test_cleanup_after_waituntilstatus_lets_nonrecov_errors_pass(self):
- err = cfy_exc.NonRecoverableError('Some non recoverable error')
- with mock.patch.object(volume, 'wait_until_status',
- side_effect=[err, (None, True)]) as wait_mock:
- self._test_cleanup__after_attach_fails(type(err), False, wait_mock)
-
- @mock.patch.object(volume, 'wait_until_status', return_value=(None, False))
- def test_cleanup_after_waituntilstatus_times_out(self, wait_mock):
- self._test_cleanup__after_attach_fails(cfy_exc.RecoverableError, True,
- wait_mock)
-
- @mock.patch('openstack_plugin_common.NovaClientWithSugar')
- @mock.patch('openstack_plugin_common.CinderClientWithSugar')
- @mock.patch.object(volume, 'wait_until_status', return_value=(None, True))
- def test_detach(self, wait_until_status_m, cinder_m, nova_m):
- volume_id = '00000000-0000-0000-0000-000000000000'
- server_id = '11111111-1111-1111-1111-111111111111'
- attachment_id = '22222222-2222-2222-2222-222222222222'
-
- attachment = {'id': attachment_id,
- 'server_id': server_id,
- 'volume_id': volume_id}
-
- volume_ctx = cfy_mocks.MockContext({
- 'node': cfy_mocks.MockContext({
- 'properties': {}
- }),
- 'instance': cfy_mocks.MockContext({
- 'runtime_properties': {
- OPENSTACK_ID_PROPERTY: volume_id,
- }
- })
- })
- server_ctx = cfy_mocks.MockContext({
- 'node': cfy_mocks.MockContext({
- 'properties': {}
- }),
- 'instance': cfy_mocks.MockContext({
- 'runtime_properties': {
- server.OPENSTACK_ID_PROPERTY: server_id
- }
- })
- })
-
- ctx_m = self._mock(node_id='a',
- target=server_ctx,
- source=volume_ctx)
-
- attached_volume = mock.Mock(id=volume_id,
- status=volume.VOLUME_STATUS_IN_USE,
- attachments=[attachment])
- nova_instance = nova_m.return_value
- cinder_instance = cinder_m.return_value
- cinder_instance.volumes.get.return_value = attached_volume
-
- server.detach_volume(ctx=ctx_m, status_attempts=10, status_timeout=2)
-
- nova_instance.volumes.delete_server_volume.assert_called_once_with(
- server_id, attachment_id)
- volume.wait_until_status.assert_called_once_with(
- cinder_client=cinder_instance,
- volume_id=volume_id,
- status=volume.VOLUME_STATUS_AVAILABLE,
- num_tries=10,
- timeout=2,
- )
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/volume.py b/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/volume.py
deleted file mode 100644
index 168681b943..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/cinder_plugin/volume.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import time
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from cloudify import exceptions as cfy_exc
-
-from openstack_plugin_common import (delete_resource_and_runtime_properties,
- with_cinder_client,
- get_resource_id,
- transform_resource_name,
- use_external_resource,
- validate_resource,
- COMMON_RUNTIME_PROPERTIES_KEYS,
- OPENSTACK_AZ_PROPERTY,
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY)
-from glance_plugin.image import handle_image_from_relationship
-
-VOLUME_STATUS_CREATING = 'creating'
-VOLUME_STATUS_DELETING = 'deleting'
-VOLUME_STATUS_AVAILABLE = 'available'
-VOLUME_STATUS_IN_USE = 'in-use'
-VOLUME_STATUS_ERROR = 'error'
-VOLUME_STATUS_ERROR_DELETING = 'error_deleting'
-VOLUME_ERROR_STATUSES = (VOLUME_STATUS_ERROR, VOLUME_STATUS_ERROR_DELETING)
-
-# Note: The 'device_name' property should actually be a property of the
-# relationship between a server and a volume; It'll move to that
-# relationship type once relationship properties are better supported.
-DEVICE_NAME_PROPERTY = 'device_name'
-
-VOLUME_OPENSTACK_TYPE = 'volume'
-
-RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS
-
-
-@operation
-@with_cinder_client
-def create(cinder_client, status_attempts, status_timeout, args, **kwargs):
-
- if use_external_resource(ctx, cinder_client, VOLUME_OPENSTACK_TYPE,
- 'name'):
- return
-
- name = get_resource_id(ctx, VOLUME_OPENSTACK_TYPE)
- volume_dict = {'name': name}
- volume_dict.update(ctx.node.properties['volume'], **args)
- handle_image_from_relationship(volume_dict, 'imageRef', ctx)
- volume_dict['name'] = transform_resource_name(
- ctx, volume_dict['name'])
-
- v = cinder_client.volumes.create(**volume_dict)
-
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = v.id
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \
- VOLUME_OPENSTACK_TYPE
- ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \
- volume_dict['name']
- wait_until_status(cinder_client=cinder_client,
- volume_id=v.id,
- status=VOLUME_STATUS_AVAILABLE,
- num_tries=status_attempts,
- timeout=status_timeout,
- )
- ctx.instance.runtime_properties[OPENSTACK_AZ_PROPERTY] = \
- v.availability_zone
-
-
-@operation
-@with_cinder_client
-def delete(cinder_client, **kwargs):
- delete_resource_and_runtime_properties(ctx, cinder_client,
- RUNTIME_PROPERTIES_KEYS)
-
-
-@with_cinder_client
-def wait_until_status(cinder_client, volume_id, status, num_tries,
- timeout):
- for _ in range(num_tries):
- volume = cinder_client.volumes.get(volume_id)
-
- if volume.status in VOLUME_ERROR_STATUSES:
- raise cfy_exc.NonRecoverableError(
- "Volume {0} is in error state".format(volume_id))
-
- if volume.status == status:
- return volume, True
- time.sleep(timeout)
-
- ctx.logger.warning("Volume {0} current state: '{1}', "
- "expected state: '{2}'".format(volume_id,
- volume.status,
- status))
- return volume, False
-
-
-@with_cinder_client
-def get_attachment(cinder_client, volume_id, server_id):
- volume = cinder_client.volumes.get(volume_id)
- for attachment in volume.attachments:
- if attachment['server_id'] == server_id:
- return attachment
-
-
-@operation
-@with_cinder_client
-def creation_validation(cinder_client, **kwargs):
- validate_resource(ctx, cinder_client, VOLUME_OPENSTACK_TYPE,
- 'name')
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/circle.yml b/aria/multivim-plugin/src/main/python/multivim-plugin/circle.yml
deleted file mode 100644
index 2a2c66e88c..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/circle.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-machine:
- python:
- version: 2.7.9
-
-checkout:
- post:
- - >
- if [ -n "$CI_PULL_REQUEST" ]; then
- PR_ID=${CI_PULL_REQUEST##*/}
- git fetch origin +refs/pull/$PR_ID/merge:
- git checkout -qf FETCH_HEAD
- fi
-
-dependencies:
- override:
- - pip install --upgrade tox virtualenv
-
-test:
- override:
- # - tox -e docs
- - tox -e flake8
- - tox -e py27
-
-# Docs artifacts
-general:
- artifacts:
- - .tox/docs/tmp/html
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/dev-requirements.txt b/aria/multivim-plugin/src/main/python/multivim-plugin/dev-requirements.txt
deleted file mode 100644
index fcb6a806cd..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/dev-requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-https://github.com/cloudify-cosmo/cloudify-dsl-parser/archive/3.4.1.zip
-https://github.com/cloudify-cosmo/cloudify-rest-client/archive/3.4.1.zip
-https://github.com/cloudify-cosmo/cloudify-plugins-common/archive/3.4.1.zip
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/__init__.py
deleted file mode 100644
index 809f033a55..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#########
-# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/image.py b/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/image.py
deleted file mode 100644
index a8d5b203f4..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/image.py
+++ /dev/null
@@ -1,177 +0,0 @@
-#########
-# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-import httplib
-from urlparse import urlparse
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from cloudify.exceptions import NonRecoverableError
-
-from openstack_plugin_common import (
- with_glance_client,
- get_resource_id,
- use_external_resource,
- get_openstack_ids_of_connected_nodes_by_openstack_type,
- delete_resource_and_runtime_properties,
- validate_resource,
- COMMON_RUNTIME_PROPERTIES_KEYS,
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY)
-
-
-IMAGE_OPENSTACK_TYPE = 'image'
-IMAGE_STATUS_ACTIVE = 'active'
-
-RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS
-REQUIRED_PROPERTIES = ['container_format', 'disk_format']
-
-
-@operation
-@with_glance_client
-def create(glance_client, **kwargs):
- if use_external_resource(ctx, glance_client, IMAGE_OPENSTACK_TYPE):
- return
-
- img_dict = {
- 'name': get_resource_id(ctx, IMAGE_OPENSTACK_TYPE)
- }
- _validate_image_dictionary()
- img_properties = ctx.node.properties['image']
- img_dict.update({key: value for key, value in img_properties.iteritems()
- if key != 'data'})
- img = glance_client.images.create(**img_dict)
- img_path = img_properties.get('data', '')
- img_url = ctx.node.properties.get('image_url')
- try:
- _validate_image()
- if img_path:
- with open(img_path, 'rb') as image_file:
- glance_client.images.upload(
- image_id=img.id,
- image_data=image_file)
- elif img_url:
- img = glance_client.images.add_location(img.id, img_url, {})
-
- except:
- _remove_protected(glance_client)
- glance_client.images.delete(image_id=img.id)
- raise
-
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = img.id
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \
- IMAGE_OPENSTACK_TYPE
- ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = img.name
-
-
-def _get_image_by_ctx(glance_client, ctx):
- return glance_client.images.get(
- image_id=ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY])
-
-
-@operation
-@with_glance_client
-def start(glance_client, start_retry_interval, **kwargs):
- img = _get_image_by_ctx(glance_client, ctx)
- if img.status != IMAGE_STATUS_ACTIVE:
- return ctx.operation.retry(
- message='Waiting for image to get uploaded',
- retry_after=start_retry_interval)
-
-
-@operation
-@with_glance_client
-def delete(glance_client, **kwargs):
- _remove_protected(glance_client)
- delete_resource_and_runtime_properties(ctx, glance_client,
- RUNTIME_PROPERTIES_KEYS)
-
-
-@operation
-@with_glance_client
-def creation_validation(glance_client, **kwargs):
- validate_resource(ctx, glance_client, IMAGE_OPENSTACK_TYPE)
- _validate_image_dictionary()
- _validate_image()
-
-
-def _validate_image_dictionary():
- img = ctx.node.properties['image']
- missing = ''
- try:
- for prop in REQUIRED_PROPERTIES:
- if prop not in img:
- missing += '{0} '.format(prop)
- except TypeError:
- missing = ' '.join(REQUIRED_PROPERTIES)
- if missing:
- raise NonRecoverableError('Required properties are missing: {'
- '0}. Please update your image '
- 'dictionary.'.format(missing))
-
-
-def _validate_image():
- img = ctx.node.properties['image']
- img_path = img.get('data')
- img_url = ctx.node.properties.get('image_url')
- if not img_url and not img_path:
- raise NonRecoverableError('Neither image url nor image path was '
- 'provided')
- if img_url and img_path:
- raise NonRecoverableError('Multiple image sources provided')
- if img_url:
- _check_url(img_url)
- if img_path:
- _check_path()
-
-
-def _check_url(url):
- p = urlparse(url)
- conn = httplib.HTTPConnection(p.netloc)
- conn.request('HEAD', p.path)
- resp = conn.getresponse()
- if resp.status >= 400:
- raise NonRecoverableError('Invalid image URL')
-
-
-def _check_path():
- img = ctx.node.properties['image']
- img_path = img.get('data')
- try:
- with open(img_path, 'rb'):
- pass
- except TypeError:
- if not img.get('url'):
- raise NonRecoverableError('No path or url provided')
- except IOError:
- raise NonRecoverableError(
- 'Unable to open image file with path: "{}"'.format(img_path))
-
-
-def _remove_protected(glance_client):
- if use_external_resource(ctx, glance_client, IMAGE_OPENSTACK_TYPE):
- return
-
- is_protected = ctx.node.properties['image'].get('protected', False)
- if is_protected:
- img_id = ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- glance_client.images.update(img_id, protected=False)
-
-
-def handle_image_from_relationship(obj_dict, property_name_to_put, ctx):
- images = get_openstack_ids_of_connected_nodes_by_openstack_type(
- ctx, IMAGE_OPENSTACK_TYPE)
- if images:
- obj_dict.update({property_name_to_put: images[0]})
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/tests/resources/test-image-start.yaml b/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/tests/resources/test-image-start.yaml
deleted file mode 100644
index 12c9aa79b7..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/tests/resources/test-image-start.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-
-tosca_definitions_version: cloudify_dsl_1_3
-
-imports:
- - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
- - plugin.yaml
-
-inputs:
- use_password:
- type: boolean
- default: false
-
-node_templates:
- image:
- type: cloudify.openstack.nodes.Image
- properties:
- image:
- disk_format: test_format
- container_format: test_format
- data: test_path
- openstack_config:
- username: aaa
- password: aaa
- tenant_name: aaa
- auth_url: aaa
- interfaces:
- cloudify.interfaces.lifecycle:
- start:
- inputs:
- start_retry_interval: 1
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/tests/test.py b/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/tests/test.py
deleted file mode 100644
index 4a88cba4e7..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/glance_plugin/tests/test.py
+++ /dev/null
@@ -1,148 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import mock
-import os
-import tempfile
-import unittest
-
-import glance_plugin
-from glance_plugin import image
-
-from cloudify.mocks import MockCloudifyContext
-from cloudify.test_utils import workflow_test
-from cloudify.exceptions import NonRecoverableError
-
-
-def ctx_mock(image_dict):
- return MockCloudifyContext(
- node_id='d',
- properties=image_dict)
-
-
-class TestCheckImage(unittest.TestCase):
-
- @mock.patch('glance_plugin.image.ctx',
- ctx_mock({'image': {}}))
- def test_check_image_no_file_no_url(self):
- # Test if it throws exception no file & no url
- self.assertRaises(NonRecoverableError,
- image._validate_image)
-
- @mock.patch('glance_plugin.image.ctx',
- ctx_mock({'image_url': 'test-url', 'image': {'data': '.'}}))
- def test_check_image_and_url(self):
- # Test if it throws exception file & url
- self.assertRaises(NonRecoverableError,
- image._validate_image)
-
- @mock.patch('glance_plugin.image.ctx',
- ctx_mock({'image_url': 'test-url', 'image': {}}))
- def test_check_image_url(self):
- # test if it passes no file & url
- http_connection_mock = mock.MagicMock()
- http_connection_mock.return_value.getresponse.return_value.status = 200
- with mock.patch('httplib.HTTPConnection', http_connection_mock):
- glance_plugin.image._validate_image()
-
- def test_check_image_file(self):
- # test if it passes file & no url
- image_file_path = tempfile.mkstemp()[1]
- with mock.patch('glance_plugin.image.ctx',
- ctx_mock({'image': {'data': image_file_path}})):
- glance_plugin.image._validate_image()
-
- @mock.patch('glance_plugin.image.ctx',
- ctx_mock({'image': {'data': '/test/path'}}))
- # test when open file throws IO error
- def test_check_image_bad_file(self):
- open_name = '%s.open' % __name__
- with mock.patch(open_name, create=True) as mock_open:
- mock_open.side_effect = [mock_open(read_data='Data').return_value]
- self.assertRaises(NonRecoverableError,
- glance_plugin.image._validate_image)
-
- @mock.patch('glance_plugin.image.ctx',
- ctx_mock({'image_url': '?', 'image': {}}))
- # test when bad url
- def test_check_image_bad_url(self):
- http_connection_mock = mock.MagicMock()
- http_connection_mock.return_value.getresponse.return_value.status = 400
- with mock.patch('httplib.HTTPConnection', http_connection_mock):
- self.assertRaises(NonRecoverableError,
- glance_plugin.image._validate_image)
-
-
-class TestValidateProperties(unittest.TestCase):
-
- @mock.patch('glance_plugin.image.ctx',
- ctx_mock({'image': {'container_format': 'bare'}}))
- def test_check_image_container_format_no_disk_format(self):
- # Test if it throws exception no file & no url
- self.assertRaises(NonRecoverableError,
- image._validate_image_dictionary)
-
- @mock.patch('glance_plugin.image.ctx',
- ctx_mock({'image': {'disk_format': 'qcow2'}}))
- def test_check_image_no_container_format_disk_format(self):
- # Test if it throws exception no container_format & disk_format
- self.assertRaises(NonRecoverableError,
- image._validate_image_dictionary)
-
- @mock.patch('glance_plugin.image.ctx',
- ctx_mock({'image': {}}))
- def test_check_image_no_container_format_no_disk_format(self):
- # Test if it throws exception no container_format & no disk_format
- self.assertRaises(NonRecoverableError,
- image._validate_image_dictionary)
-
- @mock.patch('glance_plugin.image.ctx',
- ctx_mock(
- {'image':
- {'container_format': 'bare',
- 'disk_format': 'qcow2'}}))
- def test_check_image_container_format_disk_format(self):
- # Test if it do not throw exception container_format & disk_format
- image._validate_image_dictionary()
-
-
-class TestStartImage(unittest.TestCase):
- blueprint_path = os.path.join('resources',
- 'test-image-start.yaml')
-
- @mock.patch('glance_plugin.image.create')
- @workflow_test(blueprint_path, copy_plugin_yaml=True)
- def test_image_lifecycle_start(self, cfy_local, *_):
- test_vars = {
- 'counter': 0,
- 'image': mock.MagicMock()
- }
-
- def _mock_get_image_by_ctx(*_):
- i = test_vars['image']
- if test_vars['counter'] == 0:
- i.status = 'different image status'
- else:
- i.status = glance_plugin.image.IMAGE_STATUS_ACTIVE
- test_vars['counter'] += 1
- return i
-
- with mock.patch('openstack_plugin_common.GlanceClient'):
- with mock.patch('glance_plugin.image._get_image_by_ctx',
- side_effect=_mock_get_image_by_ctx):
- cfy_local.execute('install', task_retries=3)
-
- self.assertEqual(2, test_vars['counter'])
- self.assertEqual(0, test_vars['image'].start.call_count)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/__init__.py
deleted file mode 100644
index 809f033a55..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#########
-# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/project.py b/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/project.py
deleted file mode 100644
index 223ffbbb5c..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/project.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#########
-# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from cloudify.exceptions import NonRecoverableError
-
-from openstack_plugin_common import (with_keystone_client,
- with_nova_client,
- with_cinder_client,
- with_neutron_client,
- get_resource_id,
- use_external_resource,
- delete_resource_and_runtime_properties,
- validate_resource,
- COMMON_RUNTIME_PROPERTIES_KEYS,
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY)
-
-
-PROJECT_OPENSTACK_TYPE = 'project'
-
-TENANT_QUOTA_TYPE = 'quota'
-
-RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS
-
-
-@operation
-@with_keystone_client
-def create(keystone_client, **kwargs):
- if use_external_resource(ctx, keystone_client, PROJECT_OPENSTACK_TYPE):
- return
-
- project_dict = {
- 'name': get_resource_id(ctx, PROJECT_OPENSTACK_TYPE),
- 'domain': 'default'
- }
-
- project_dict.update(ctx.node.properties['project'])
- project = keystone_client.projects.create(**project_dict)
-
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = project.id
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \
- PROJECT_OPENSTACK_TYPE
- ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = project.name
-
-
-@operation
-@with_keystone_client
-@with_nova_client
-@with_cinder_client
-@with_neutron_client
-def start(keystone_client, nova_client, cinder_client, neutron_client,
- **kwargs):
- project_id = ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- users = ctx.node.properties['users']
- validate_users(users, keystone_client)
-
- assign_users(project_id, users, keystone_client)
-
- quota = ctx.node.properties[TENANT_QUOTA_TYPE]
- update_quota(project_id, quota, nova_client, 'nova')
- update_quota(project_id, quota, neutron_client, 'neutron')
- update_quota(project_id, quota, cinder_client, 'cinder')
-
-
-@operation
-@with_keystone_client
-@with_nova_client
-@with_cinder_client
-@with_neutron_client
-def delete(keystone_client, nova_client, cinder_client,
- neutron_client, **kwargs):
- tenant_id = ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- quota = ctx.node.properties[TENANT_QUOTA_TYPE]
- delete_quota(tenant_id, quota, nova_client, 'nova')
- delete_quota(tenant_id, quota, neutron_client, 'neutron')
- delete_quota(tenant_id, quota, cinder_client, 'cinder')
- delete_resource_and_runtime_properties(ctx, keystone_client,
- RUNTIME_PROPERTIES_KEYS)
-
-
-@operation
-@with_keystone_client
-def creation_validation(keystone_client, **kwargs):
- validate_resource(ctx, keystone_client, PROJECT_OPENSTACK_TYPE)
-
-
-def assign_users(project_id, users, keystone_client):
- for user in users:
- roles = user['roles']
- u = keystone_client.users.find(name=user['name'])
- for role in roles:
- r = keystone_client.roles.find(name=role)
- keystone_client.roles.grant(user=u.id,
- project=project_id,
- role=r.id)
-
-
-def validate_users(users, keystone_client):
- user_names = [user['name'] for user in users]
- if len(user_names) > len(set(user_names)):
- raise NonRecoverableError('Users are not unique')
-
- for user_name in user_names:
- keystone_client.users.find(name=user_name)
-
- for user in users:
- if len(user['roles']) > len(set(user['roles'])):
- msg = 'Roles for user {} are not unique'
- raise NonRecoverableError(msg.format(user['name']))
-
- role_names = {role for user in users for role in user['roles']}
- for role_name in role_names:
- keystone_client.roles.find(name=role_name)
-
-
-def update_quota(tenant_id, quota, client, what_quota):
- updated_quota = quota.get(what_quota)
- if updated_quota:
- if what_quota == 'neutron':
- new_quota = client.update_quota(tenant_id=tenant_id,
- body={'quota': updated_quota})
- else:
- new_quota = client.quotas.update(tenant_id=tenant_id,
- **updated_quota)
- ctx.logger.info(
- 'Updated {0} quota: {1}'.format(what_quota, str(new_quota)))
-
-
-def delete_quota(project_id, quota, client, what_quota):
- deleting_quota = quota.get(what_quota)
- if deleting_quota:
- if what_quota == 'neutron':
- client.delete_quota(tenant_id=project_id)
- else:
- client.quotas.delete(tenant_id=project_id)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/tests/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/tests/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/tests/__init__.py
+++ /dev/null
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/tests/test.py b/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/tests/test.py
deleted file mode 100644
index de6567ba3a..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/keystone_plugin/tests/test.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import mock
-import unittest
-
-from cloudify.context import NODE_INSTANCE
-
-from cloudify.mocks import (
- MockContext,
- MockNodeInstanceContext,
- MockNodeContext
-)
-from openstack_plugin_common import (
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_NAME_PROPERTY,
- OPENSTACK_TYPE_PROPERTY
- )
-from keystone_plugin.project import PROJECT_OPENSTACK_TYPE
-import keystone_plugin
-
-
-class TestProject(unittest.TestCase):
-
- test_id = 'test-id'
- test_name = 'test-name'
- test_deployment_id = 'test-deployment-id'
- test_user = 'test-user'
- test_role = 'test-role'
-
- class MockProjectOS:
- def __init__(self, id, name):
- self._id = id
- self._name = name
- self._users = {}
-
- @property
- def id(self):
- return self._id
-
- @property
- def name(self):
- return self._name
-
- def find(self, *_, **__):
- return mock.MagicMock(id='test-role')
-
- def grant(self, role, user, *_, **__):
- self._users[user] = role
-
- def mock_keystone_client(self, mock_project):
- keystone_client = mock.MagicMock()
- keystone_client.projects.create.return_value = mock_project
- keystone_client.users.find.return_value = mock.MagicMock(
- id=self.test_user)
- keystone_client.roles = mock_project
- return keystone_client
-
- def mock_ctx(self, test_vars, test_id,
- test_deployment_id, runtime_properties=None):
- ctx = MockContext()
- ctx.node = MockNodeContext(properties=test_vars)
- ctx.instance = MockNodeInstanceContext(
- id=test_id, runtime_properties=runtime_properties or {})
- ctx.deployment = mock.Mock()
- ctx.deployment.id = test_deployment_id
- ctx.type = NODE_INSTANCE
- return ctx
-
- @mock.patch('openstack_plugin_common._put_client_in_kw',
- autospec=True, return_value=None)
- def test_keystone_project_create(self, *_):
- test_vars = {
- 'project': {},
- 'resource_id': '',
- 'quota': {},
- 'users': {}
- }
-
- ctx = self.mock_ctx(test_vars, self.test_id, self.test_deployment_id)
- keystone_plugin.project.ctx = ctx
- keystone_plugin.project.create(
- self.mock_keystone_client(self.MockProjectOS(self.test_id,
- self.test_name)))
- self.assertEqual(self.test_name,
- ctx.instance.runtime_properties[
- OPENSTACK_NAME_PROPERTY])
- self.assertEqual(self.test_id,
- ctx.instance.runtime_properties[
- OPENSTACK_ID_PROPERTY])
- self.assertEqual(PROJECT_OPENSTACK_TYPE,
- ctx.instance.runtime_properties[
- OPENSTACK_TYPE_PROPERTY])
-
- @mock.patch('openstack_plugin_common._put_client_in_kw',
- autospec=True, return_value=None)
- def test_assign_user(self, *_):
- test_vars = {
- 'project': {},
- 'resource_id': '',
- 'quota': {},
- 'users': [{'name': self.test_user,
- 'roles': [self.test_role]}]
- }
- ctx = self.mock_ctx(test_vars,
- self.test_id,
- self.test_deployment_id,
- {OPENSTACK_ID_PROPERTY: self.test_id})
- mock_project = self.MockProjectOS(self.test_id, self.test_name)
- keystone_client = self.mock_keystone_client(mock_project)
- keystone_plugin.project.ctx = ctx
- keystone_plugin.project.start(
- keystone_client,
- mock.MagicMock(), # nova_client
- mock.MagicMock(), # cinder_client
- mock.MagicMock()) # neutron_client
- self.assertEqual({self.test_user: self.test_role},
- mock_project._users)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/__init__.py
deleted file mode 100644
index 04cb21f745..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__author__ = 'idanmo'
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/floatingip.py b/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/floatingip.py
deleted file mode 100644
index 1a9d0449ca..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/floatingip.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from cloudify.exceptions import NonRecoverableError
-from openstack_plugin_common import (
- with_neutron_client,
- provider,
- is_external_relationship,
- is_external_relationship_not_conditionally_created,
- OPENSTACK_ID_PROPERTY
-)
-from openstack_plugin_common.floatingip import (
- use_external_floatingip,
- set_floatingip_runtime_properties,
- delete_floatingip,
- floatingip_creation_validation
-)
-
-
-@operation
-@with_neutron_client
-def create(neutron_client, args, **kwargs):
-
- if use_external_floatingip(neutron_client, 'floating_ip_address',
- lambda ext_fip: ext_fip['floating_ip_address']):
- return
-
- floatingip = {
- # No defaults
- }
- floatingip.update(ctx.node.properties['floatingip'], **args)
-
- # Sugar: floating_network_name -> (resolve) -> floating_network_id
- if 'floating_network_name' in floatingip:
- floatingip['floating_network_id'] = neutron_client.cosmo_get_named(
- 'network', floatingip['floating_network_name'])['id']
- del floatingip['floating_network_name']
- elif 'floating_network_id' not in floatingip:
- provider_context = provider(ctx)
- ext_network = provider_context.ext_network
- if ext_network:
- floatingip['floating_network_id'] = ext_network['id']
- else:
- raise NonRecoverableError(
- 'Missing floating network id, name or external network')
-
- fip = neutron_client.create_floatingip(
- {'floatingip': floatingip})['floatingip']
- set_floatingip_runtime_properties(fip['id'], fip['floating_ip_address'])
-
- ctx.logger.info('Floating IP creation response: {0}'.format(fip))
-
-
-@operation
-@with_neutron_client
-def delete(neutron_client, **kwargs):
- delete_floatingip(neutron_client)
-
-
-@operation
-@with_neutron_client
-def creation_validation(neutron_client, **kwargs):
- floatingip_creation_validation(neutron_client, 'floating_ip_address')
-
-
-@operation
-@with_neutron_client
-def connect_port(neutron_client, **kwargs):
- if is_external_relationship_not_conditionally_created(ctx):
- return
-
- port_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- floating_ip_id = ctx.target.instance.runtime_properties[
- OPENSTACK_ID_PROPERTY]
- fip = {'port_id': port_id}
- neutron_client.update_floatingip(floating_ip_id, {'floatingip': fip})
-
-
-@operation
-@with_neutron_client
-def disconnect_port(neutron_client, **kwargs):
- if is_external_relationship(ctx):
- ctx.logger.info('Not disassociating floatingip and port since '
- 'external floatingip and port are being used')
- return
-
- floating_ip_id = ctx.target.instance.runtime_properties[
- OPENSTACK_ID_PROPERTY]
- fip = {'port_id': None}
- neutron_client.update_floatingip(floating_ip_id, {'floatingip': fip})
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/network.py b/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/network.py
deleted file mode 100644
index eadcc3b4e8..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/network.py
+++ /dev/null
@@ -1,109 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from cloudify.exceptions import NonRecoverableError
-from openstack_plugin_common import (
- transform_resource_name,
- with_neutron_client,
- get_resource_id,
- is_external_resource,
- is_external_resource_not_conditionally_created,
- delete_resource_and_runtime_properties,
- use_external_resource,
- validate_resource,
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY,
- COMMON_RUNTIME_PROPERTIES_KEYS
-)
-
-NETWORK_OPENSTACK_TYPE = 'network'
-
-# Runtime properties
-RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS
-
-
-@operation
-@with_neutron_client
-def create(neutron_client, args, **kwargs):
-
- if use_external_resource(ctx, neutron_client, NETWORK_OPENSTACK_TYPE):
- return
-
- network = {
- 'admin_state_up': True,
- 'name': get_resource_id(ctx, NETWORK_OPENSTACK_TYPE),
- }
- network.update(ctx.node.properties['network'], **args)
- transform_resource_name(ctx, network)
-
- net = neutron_client.create_network({'network': network})['network']
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = net['id']
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] =\
- NETWORK_OPENSTACK_TYPE
- ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = net['name']
-
-
-@operation
-@with_neutron_client
-def start(neutron_client, **kwargs):
- network_id = ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
-
- if is_external_resource_not_conditionally_created(ctx):
- ctx.logger.info('Validating external network is started')
- if not neutron_client.show_network(
- network_id)['network']['admin_state_up']:
- raise NonRecoverableError(
- 'Expected external resource network {0} to be in '
- '"admin_state_up"=True'.format(network_id))
- return
-
- neutron_client.update_network(
- network_id, {
- 'network': {
- 'admin_state_up': True
- }
- })
-
-
-@operation
-@with_neutron_client
-def stop(neutron_client, **kwargs):
- if is_external_resource(ctx):
- ctx.logger.info('Not stopping network since an external network is '
- 'being used')
- return
-
- neutron_client.update_network(
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY], {
- 'network': {
- 'admin_state_up': False
- }
- })
-
-
-@operation
-@with_neutron_client
-def delete(neutron_client, **kwargs):
- delete_resource_and_runtime_properties(ctx, neutron_client,
- RUNTIME_PROPERTIES_KEYS)
-
-
-@operation
-@with_neutron_client
-def creation_validation(neutron_client, **kwargs):
- validate_resource(ctx, neutron_client, NETWORK_OPENSTACK_TYPE)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/port.py b/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/port.py
deleted file mode 100644
index 4db4c442c5..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/port.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from cloudify.exceptions import NonRecoverableError
-
-import neutronclient.common.exceptions as neutron_exceptions
-
-from openstack_plugin_common import (
- transform_resource_name,
- with_neutron_client,
- with_nova_client,
- get_resource_id,
- get_openstack_id_of_single_connected_node_by_openstack_type,
- delete_resource_and_runtime_properties,
- delete_runtime_properties,
- use_external_resource,
- is_external_relationship,
- validate_resource,
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY,
- COMMON_RUNTIME_PROPERTIES_KEYS,
- is_external_relationship_not_conditionally_created)
-
-from neutron_plugin.network import NETWORK_OPENSTACK_TYPE
-from neutron_plugin.subnet import SUBNET_OPENSTACK_TYPE
-from openstack_plugin_common.floatingip import get_server_floating_ip
-
-PORT_OPENSTACK_TYPE = 'port'
-
-# Runtime properties
-FIXED_IP_ADDRESS_PROPERTY = 'fixed_ip_address' # the fixed ip address
-MAC_ADDRESS_PROPERTY = 'mac_address' # the mac address
-RUNTIME_PROPERTIES_KEYS = \
- COMMON_RUNTIME_PROPERTIES_KEYS + [FIXED_IP_ADDRESS_PROPERTY,
- MAC_ADDRESS_PROPERTY]
-
-NO_SG_PORT_CONNECTION_RETRY_INTERVAL = 3
-
-
-@operation
-@with_neutron_client
-def create(neutron_client, args, **kwargs):
-
- ext_port = use_external_resource(ctx, neutron_client, PORT_OPENSTACK_TYPE)
- if ext_port:
- try:
- net_id = \
- get_openstack_id_of_single_connected_node_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE, True)
-
- if net_id:
- port_id = ctx.instance.runtime_properties[
- OPENSTACK_ID_PROPERTY]
-
- if neutron_client.show_port(
- port_id)['port']['network_id'] != net_id:
- raise NonRecoverableError(
- 'Expected external resources port {0} and network {1} '
- 'to be connected'.format(port_id, net_id))
-
- ctx.instance.runtime_properties[FIXED_IP_ADDRESS_PROPERTY] = \
- _get_fixed_ip(ext_port)
- ctx.instance.runtime_properties[MAC_ADDRESS_PROPERTY] = \
- ext_port['mac_address']
- return
- except Exception:
- delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS)
- raise
-
- net_id = get_openstack_id_of_single_connected_node_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE)
-
- port = {
- 'name': get_resource_id(ctx, PORT_OPENSTACK_TYPE),
- 'network_id': net_id,
- 'security_groups': [],
- }
-
- _handle_fixed_ips(port)
- port.update(ctx.node.properties['port'], **args)
- transform_resource_name(ctx, port)
-
- p = neutron_client.create_port({'port': port})['port']
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = p['id']
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] =\
- PORT_OPENSTACK_TYPE
- ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = p['name']
- ctx.instance.runtime_properties[FIXED_IP_ADDRESS_PROPERTY] = \
- _get_fixed_ip(p)
- ctx.instance.runtime_properties[MAC_ADDRESS_PROPERTY] = p['mac_address']
-
-
-@operation
-@with_neutron_client
-def delete(neutron_client, **kwargs):
- try:
- delete_resource_and_runtime_properties(ctx, neutron_client,
- RUNTIME_PROPERTIES_KEYS)
- except neutron_exceptions.NeutronClientException, e:
- if e.status_code == 404:
- # port was probably deleted when an attached device was deleted
- delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS)
- else:
- raise
-
-
-@operation
-@with_nova_client
-@with_neutron_client
-def detach(nova_client, neutron_client, **kwargs):
-
- if is_external_relationship(ctx):
- ctx.logger.info('Not detaching port from server since '
- 'external port and server are being used')
- return
-
- port_id = ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
-
- server_floating_ip = get_server_floating_ip(neutron_client, server_id)
- if server_floating_ip:
- ctx.logger.info('We have floating ip {0} attached to server'
- .format(server_floating_ip['floating_ip_address']))
- server = nova_client.servers.get(server_id)
- server.remove_floating_ip(server_floating_ip['floating_ip_address'])
- return ctx.operation.retry(
- message='Waiting for the floating ip {0} to '
- 'detach from server {1}..'
- .format(server_floating_ip['floating_ip_address'],
- server_id),
- retry_after=10)
- change = {
- 'port': {
- 'device_id': '',
- 'device_owner': ''
- }
- }
- ctx.logger.info('Detaching port {0}...'.format(port_id))
- neutron_client.update_port(port_id, change)
- ctx.logger.info('Successfully detached port {0}'.format(port_id))
-
-
-@operation
-@with_neutron_client
-def connect_security_group(neutron_client, **kwargs):
- port_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- security_group_id = ctx.target.instance.runtime_properties[
- OPENSTACK_ID_PROPERTY]
-
- if is_external_relationship_not_conditionally_created(ctx):
- ctx.logger.info('Validating external port and security-group are '
- 'connected')
- if any(sg for sg in neutron_client.show_port(port_id)['port'].get(
- 'security_groups', []) if sg == security_group_id):
- return
- raise NonRecoverableError(
- 'Expected external resources port {0} and security-group {1} to '
- 'be connected'.format(port_id, security_group_id))
-
- # WARNING: non-atomic operation
- port = neutron_client.cosmo_get('port', id=port_id)
- ctx.logger.info(
- "connect_security_group(): source_id={0} target={1}".format(
- port_id, ctx.target.instance.runtime_properties))
- sgs = port['security_groups'] + [security_group_id]
- neutron_client.update_port(port_id, {'port': {'security_groups': sgs}})
-
- # Double check if SG has been actually updated (a race-condition
- # in OpenStack):
- port_info = neutron_client.show_port(port_id)['port']
- port_security_groups = port_info.get('security_groups', [])
- if security_group_id not in port_security_groups:
- return ctx.operation.retry(
- message='Security group connection (`{0}\' -> `{1}\')'
- ' has not been established!'.format(port_id,
- security_group_id),
- retry_after=NO_SG_PORT_CONNECTION_RETRY_INTERVAL
- )
-
-
-@operation
-@with_neutron_client
-def creation_validation(neutron_client, **kwargs):
- validate_resource(ctx, neutron_client, PORT_OPENSTACK_TYPE)
-
-
-def _get_fixed_ip(port):
- # a port may have no fixed IP if it's set on a network without subnets
- return port['fixed_ips'][0]['ip_address'] if port['fixed_ips'] else None
-
-
-def _handle_fixed_ips(port):
- fixed_ips_element = {}
-
- # checking for fixed ip property
- if ctx.node.properties['fixed_ip']:
- fixed_ips_element['ip_address'] = ctx.node.properties['fixed_ip']
-
- # checking for a connected subnet
- subnet_id = get_openstack_id_of_single_connected_node_by_openstack_type(
- ctx, SUBNET_OPENSTACK_TYPE, if_exists=True)
- if subnet_id:
- fixed_ips_element['subnet_id'] = subnet_id
-
- # applying fixed ip parameter, if available
- if fixed_ips_element:
- port['fixed_ips'] = [fixed_ips_element]
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/router.py b/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/router.py
deleted file mode 100644
index 1a2851e4bc..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/router.py
+++ /dev/null
@@ -1,215 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import warnings
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from cloudify.exceptions import NonRecoverableError
-
-from openstack_plugin_common import (
- provider,
- transform_resource_name,
- get_resource_id,
- with_neutron_client,
- use_external_resource,
- is_external_relationship,
- is_external_relationship_not_conditionally_created,
- delete_runtime_properties,
- get_openstack_ids_of_connected_nodes_by_openstack_type,
- delete_resource_and_runtime_properties,
- get_resource_by_name_or_id,
- validate_resource,
- COMMON_RUNTIME_PROPERTIES_KEYS,
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY
-)
-
-from neutron_plugin.network import NETWORK_OPENSTACK_TYPE
-
-
-ROUTER_OPENSTACK_TYPE = 'router'
-
-# Runtime properties
-RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS
-
-
-@operation
-@with_neutron_client
-def create(neutron_client, args, **kwargs):
-
- if use_external_resource(ctx, neutron_client, ROUTER_OPENSTACK_TYPE):
- try:
- ext_net_id_by_rel = _get_connected_ext_net_id(neutron_client)
-
- if ext_net_id_by_rel:
- router_id = \
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
-
- router = neutron_client.show_router(router_id)['router']
- if not (router['external_gateway_info'] and 'network_id' in
- router['external_gateway_info'] and
- router['external_gateway_info']['network_id'] ==
- ext_net_id_by_rel):
- raise NonRecoverableError(
- 'Expected external resources router {0} and '
- 'external network {1} to be connected'.format(
- router_id, ext_net_id_by_rel))
- return
- except Exception:
- delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS)
- raise
-
- router = {
- 'name': get_resource_id(ctx, ROUTER_OPENSTACK_TYPE),
- }
- router.update(ctx.node.properties['router'], **args)
- transform_resource_name(ctx, router)
-
- _handle_external_network_config(router, neutron_client)
-
- r = neutron_client.create_router({'router': router})['router']
-
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = r['id']
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] =\
- ROUTER_OPENSTACK_TYPE
- ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = r['name']
-
-
-@operation
-@with_neutron_client
-def connect_subnet(neutron_client, **kwargs):
- router_id = ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- subnet_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
-
- if is_external_relationship_not_conditionally_created(ctx):
- ctx.logger.info('Validating external subnet and router '
- 'are associated')
- for port in neutron_client.list_ports(device_id=router_id)['ports']:
- for fixed_ip in port.get('fixed_ips', []):
- if fixed_ip.get('subnet_id') == subnet_id:
- return
- raise NonRecoverableError(
- 'Expected external resources router {0} and subnet {1} to be '
- 'connected'.format(router_id, subnet_id))
-
- neutron_client.add_interface_router(router_id, {'subnet_id': subnet_id})
-
-
-@operation
-@with_neutron_client
-def disconnect_subnet(neutron_client, **kwargs):
- if is_external_relationship(ctx):
- ctx.logger.info('Not connecting subnet and router since external '
- 'subnet and router are being used')
- return
-
- neutron_client.remove_interface_router(
- ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY], {
- 'subnet_id': ctx.source.instance.runtime_properties[
- OPENSTACK_ID_PROPERTY]
- }
- )
-
-
-@operation
-@with_neutron_client
-def delete(neutron_client, **kwargs):
- delete_resource_and_runtime_properties(ctx, neutron_client,
- RUNTIME_PROPERTIES_KEYS)
-
-
-@operation
-@with_neutron_client
-def creation_validation(neutron_client, **kwargs):
- validate_resource(ctx, neutron_client, ROUTER_OPENSTACK_TYPE)
-
-
-def _insert_ext_net_id_to_router_config(ext_net_id, router):
- router['external_gateway_info'] = router.get(
- 'external_gateway_info', {})
- router['external_gateway_info']['network_id'] = ext_net_id
-
-
-def _handle_external_network_config(router, neutron_client):
- # attempting to find an external network for the router to connect to -
- # first by either a network name or id passed in explicitly; then by a
- # network connected by a relationship; with a final optional fallback to an
- # external network set in the Provider-context. Otherwise the router will
- # simply not get connected to an external network
-
- provider_context = provider(ctx)
-
- ext_net_id_by_rel = _get_connected_ext_net_id(neutron_client)
- ext_net_by_property = ctx.node.properties['external_network']
-
- # the following is meant for backwards compatibility with the
- # 'network_name' sugaring
- if 'external_gateway_info' in router and 'network_name' in \
- router['external_gateway_info']:
- warnings.warn(
- 'Passing external "network_name" inside the '
- 'external_gateway_info key of the "router" property is now '
- 'deprecated; Use the "external_network" property instead',
- DeprecationWarning)
-
- ext_net_by_property = router['external_gateway_info']['network_name']
- del (router['external_gateway_info']['network_name'])
-
- # need to check if the user explicitly passed network_id in the external
- # gateway configuration as it affects external network behavior by
- # relationship and/or provider context
- if 'external_gateway_info' in router and 'network_id' in \
- router['external_gateway_info']:
- ext_net_by_property = router['external_gateway_info']['network_name']
-
- if ext_net_by_property and ext_net_id_by_rel:
- raise RuntimeError(
- "Router can't have an external network connected by both a "
- 'relationship and by a network name/id')
-
- if ext_net_by_property:
- ext_net_id = get_resource_by_name_or_id(
- ext_net_by_property, NETWORK_OPENSTACK_TYPE, neutron_client)['id']
- _insert_ext_net_id_to_router_config(ext_net_id, router)
- elif ext_net_id_by_rel:
- _insert_ext_net_id_to_router_config(ext_net_id_by_rel, router)
- elif ctx.node.properties['default_to_managers_external_network'] and \
- provider_context.ext_network:
- _insert_ext_net_id_to_router_config(provider_context.ext_network['id'],
- router)
-
-
-def _check_if_network_is_external(neutron_client, network_id):
- return neutron_client.show_network(
- network_id)['network']['router:external']
-
-
-def _get_connected_ext_net_id(neutron_client):
- ext_net_ids = \
- [net_id
- for net_id in
- get_openstack_ids_of_connected_nodes_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE) if
- _check_if_network_is_external(neutron_client, net_id)]
-
- if len(ext_net_ids) > 1:
- raise NonRecoverableError(
- 'More than one external network is connected to router {0}'
- ' by a relationship; External network IDs: {0}'.format(
- ext_net_ids))
-
- return ext_net_ids[0] if ext_net_ids else None
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/security_group.py b/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/security_group.py
deleted file mode 100644
index 5f335f482b..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/security_group.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from time import sleep
-
-from requests.exceptions import RequestException
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from cloudify.exceptions import NonRecoverableError
-from openstack_plugin_common import (
- transform_resource_name,
- with_neutron_client,
- delete_resource_and_runtime_properties,
-)
-from openstack_plugin_common.security_group import (
- build_sg_data,
- process_rules,
- use_external_sg,
- set_sg_runtime_properties,
- delete_sg,
- sg_creation_validation,
- RUNTIME_PROPERTIES_KEYS
-)
-
-DEFAULT_RULE_VALUES = {
- 'direction': 'ingress',
- 'ethertype': 'IPv4',
- 'port_range_min': 1,
- 'port_range_max': 65535,
- 'protocol': 'tcp',
- 'remote_group_id': None,
- 'remote_ip_prefix': '0.0.0.0/0',
-}
-
-
-@operation
-@with_neutron_client
-def create(
- neutron_client, args,
- status_attempts=10, status_timeout=2, **kwargs
-):
-
- security_group = build_sg_data(args)
- if not security_group['description']:
- security_group['description'] = ctx.node.properties['description']
-
- sg_rules = process_rules(neutron_client, DEFAULT_RULE_VALUES,
- 'remote_ip_prefix', 'remote_group_id',
- 'port_range_min', 'port_range_max')
-
- disable_default_egress_rules = ctx.node.properties.get(
- 'disable_default_egress_rules')
-
- if use_external_sg(neutron_client):
- return
-
- transform_resource_name(ctx, security_group)
-
- sg = neutron_client.create_security_group(
- {'security_group': security_group})['security_group']
-
- for attempt in range(max(status_attempts, 1)):
- sleep(status_timeout)
- try:
- neutron_client.show_security_group(sg['id'])
- except RequestException as e:
- ctx.logger.debug("Waiting for SG to be visible. Attempt {}".format(
- attempt))
- else:
- break
- else:
- raise NonRecoverableError(
- "Timed out waiting for security_group to exist", e)
-
- set_sg_runtime_properties(sg, neutron_client)
-
- try:
- if disable_default_egress_rules:
- for er in _egress_rules(_rules_for_sg_id(neutron_client,
- sg['id'])):
- neutron_client.delete_security_group_rule(er['id'])
-
- for sgr in sg_rules:
- sgr['security_group_id'] = sg['id']
- neutron_client.create_security_group_rule(
- {'security_group_rule': sgr})
- except Exception:
- try:
- delete_resource_and_runtime_properties(
- ctx, neutron_client,
- RUNTIME_PROPERTIES_KEYS)
- except Exception as e:
- raise NonRecoverableError(
- 'Exception while tearing down for retry', e)
- raise
-
-
-@operation
-@with_neutron_client
-def delete(neutron_client, **kwargs):
- delete_sg(neutron_client)
-
-
-@operation
-@with_neutron_client
-def creation_validation(neutron_client, **kwargs):
- sg_creation_validation(neutron_client, 'remote_ip_prefix')
-
-
-def _egress_rules(rules):
- return [rule for rule in rules if rule.get('direction') == 'egress']
-
-
-def _rules_for_sg_id(neutron_client, id):
- rules = neutron_client.list_security_group_rules()['security_group_rules']
- rules = [rule for rule in rules if rule['security_group_id'] == id]
- return rules
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/subnet.py b/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/subnet.py
deleted file mode 100644
index 6e97c96755..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/subnet.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from cloudify.exceptions import NonRecoverableError
-from openstack_plugin_common import (
- with_neutron_client,
- transform_resource_name,
- get_resource_id,
- get_openstack_id_of_single_connected_node_by_openstack_type,
- delete_resource_and_runtime_properties,
- delete_runtime_properties,
- use_external_resource,
- validate_resource,
- validate_ip_or_range_syntax,
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY,
- COMMON_RUNTIME_PROPERTIES_KEYS
-)
-
-from neutron_plugin.network import NETWORK_OPENSTACK_TYPE
-
-SUBNET_OPENSTACK_TYPE = 'subnet'
-
-# Runtime properties
-RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS
-
-
-@operation
-@with_neutron_client
-def create(neutron_client, args, **kwargs):
-
- if use_external_resource(ctx, neutron_client, SUBNET_OPENSTACK_TYPE):
- try:
- net_id = \
- get_openstack_id_of_single_connected_node_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE, True)
-
- if net_id:
- subnet_id = \
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
-
- if neutron_client.show_subnet(
- subnet_id)['subnet']['network_id'] != net_id:
- raise NonRecoverableError(
- 'Expected external resources subnet {0} and network'
- ' {1} to be connected'.format(subnet_id, net_id))
- return
- except Exception:
- delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS)
- raise
-
- net_id = get_openstack_id_of_single_connected_node_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE)
- subnet = {
- 'name': get_resource_id(ctx, SUBNET_OPENSTACK_TYPE),
- 'network_id': net_id,
- }
- subnet.update(ctx.node.properties['subnet'], **args)
- transform_resource_name(ctx, subnet)
-
- s = neutron_client.create_subnet({'subnet': subnet})['subnet']
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = s['id']
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \
- SUBNET_OPENSTACK_TYPE
- ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = subnet['name']
-
-
-@operation
-@with_neutron_client
-def delete(neutron_client, **kwargs):
- delete_resource_and_runtime_properties(ctx, neutron_client,
- RUNTIME_PROPERTIES_KEYS)
-
-
-@operation
-@with_neutron_client
-def creation_validation(neutron_client, args, **kwargs):
- validate_resource(ctx, neutron_client, SUBNET_OPENSTACK_TYPE)
- subnet = dict(ctx.node.properties['subnet'], **args)
-
- if 'cidr' not in subnet:
- err = '"cidr" property must appear under the "subnet" property of a ' \
- 'subnet node'
- ctx.logger.error('VALIDATION ERROR: ' + err)
- raise NonRecoverableError(err)
- validate_ip_or_range_syntax(ctx, subnet['cidr'])
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/__init__.py
deleted file mode 100644
index 04cb21f745..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__author__ = 'idanmo'
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test.py b/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test.py
deleted file mode 100644
index 459c23a6cd..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test.py
+++ /dev/null
@@ -1,220 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import mock
-import random
-import string
-import unittest
-
-from cloudify.exceptions import NonRecoverableError
-from cloudify.context import BootstrapContext
-
-from cloudify.mocks import MockCloudifyContext
-
-import openstack_plugin_common as common
-import openstack_plugin_common.tests.test as common_test
-
-import neutron_plugin
-import neutron_plugin.network
-import neutron_plugin.port
-import neutron_plugin.router
-import neutron_plugin.security_group
-
-
-class ResourcesRenamingTest(unittest.TestCase):
- def setUp(self):
- neutron_plugin.port._find_network_in_related_nodes = mock.Mock()
- # *** Configs from files ********************
- common.Config.get = mock.Mock()
- common.Config.get.return_value = {}
- # *** Neutron ********************
- self.neutron_mock = mock.Mock()
-
- def neutron_mock_connect(unused_self, unused_cfg):
- return self.neutron_mock
- common.NeutronClient.connect = neutron_mock_connect
-
- self.neutron_mock.cosmo_list = mock.Mock()
- self.neutron_mock.cosmo_list.return_value = []
-
- def _setup_ctx(self, obj_type):
- ctx = common_test.create_mock_ctx_with_provider_info(
- node_id='__cloudify_id_something_001',
- properties={
- obj_type: {
- 'name': obj_type + '_name',
- },
- 'rules': [] # For security_group
- }
- )
- return ctx
-
- def _test(self, obj_type):
- ctx = self._setup_ctx(obj_type)
- attr = getattr(self.neutron_mock, 'create_' + obj_type)
- attr.return_value = {
- obj_type: {
- 'id': obj_type + '_id',
- }
- }
- getattr(neutron_plugin, obj_type).create(ctx)
- calls = attr.mock_calls
- self.assertEquals(len(calls), 1) # Exactly one object created
- # Indexes into call[]:
- # 0 - the only call
- # 1 - regular arguments
- # 0 - first argument
- arg = calls[0][1][0]
- self.assertEquals(arg[obj_type]['name'], 'p2_' + obj_type + '_name')
-
- def test_network(self):
- self._test('network')
-
- def test_port(self):
- self._test('port')
-
- def test_router(self):
- self._test('router')
-
- def test_security_group(self):
- self._test('security_group')
-
- # Network chosen arbitrary for this test.
- # Just testing something without prefix.
- def test_network_no_prefix(self):
- ctx = self._setup_ctx('network')
- for pctx in common_test.BOOTSTRAP_CONTEXTS_WITHOUT_PREFIX:
- ctx._bootstrap_context = BootstrapContext(pctx)
- self.neutron_mock.create_network.reset_mock()
- self.neutron_mock.create_network.return_value = {
- 'network': {
- 'id': 'network_id',
- }
- }
- neutron_plugin.network.create(ctx)
- calls = self.neutron_mock.create_network.mock_calls
- self.assertEquals(len(calls), 1) # Exactly one network created
- # Indexes into call[]:
- # 0 - the only call
- # 1 - regular arguments
- # 0 - first argument
- arg = calls[0][1][0]
- self.assertEquals(arg['network']['name'], 'network_name',
- "Failed with context: " + str(pctx))
-
-
-def _rand_str(n):
- chars = string.ascii_uppercase + string.digits
- return ''.join(random.choice(chars) for _ in range(n))
-
-
-class SecurityGroupTest(unittest.TestCase):
- def setUp(self):
- # *** Configs from files ********************
- common.Config.get = mock.Mock()
- common.Config.get.return_value = {}
- # *** Neutron ********************
- self.neutron_mock = mock.Mock()
-
- def neutron_mock_connect(unused_self, unused_cfg):
- return self.neutron_mock
- common.NeutronClient.connect = neutron_mock_connect
- neutron_plugin.security_group._rules_for_sg_id = mock.Mock()
- neutron_plugin.security_group._rules_for_sg_id.return_value = []
-
- def _setup_ctx(self):
- sg_name = _rand_str(6) + '_new'
- ctx = MockCloudifyContext(properties={
- 'security_group': {
- 'name': sg_name,
- 'description': 'blah'
- },
- 'rules': [{'port': 80}],
- 'disable_default_egress_rules': True,
- })
- return ctx
-
- def test_sg_new(self):
- ctx = self._setup_ctx()
- self.neutron_mock.cosmo_list = mock.Mock()
- self.neutron_mock.cosmo_list.return_value = []
- self.neutron_mock.create_security_group = mock.Mock()
- self.neutron_mock.create_security_group.return_value = {
- 'security_group': {
- 'description': 'blah',
- 'id': ctx['security_group']['name'] + '_id',
- }
- }
- neutron_plugin.security_group.create(ctx)
- self.assertTrue(self.neutron_mock.create_security_group.mock_calls)
-
- def test_sg_use_existing(self):
- ctx = self._setup_ctx()
- self.neutron_mock.cosmo_list = mock.Mock()
- self.neutron_mock.cosmo_list.return_value = [{
- 'id': ctx['security_group']['name'] + '_existing_id',
- 'description': 'blah',
- 'security_group_rules': [{
- 'remote_group_id': None,
- 'direction': 'ingress',
- 'protocol': 'tcp',
- 'ethertype': 'IPv4',
- 'port_range_max': 80,
- 'port_range_min': 80,
- 'remote_ip_prefix': '0.0.0.0/0',
- }]
- }]
- self.neutron_mock.create_security_group = mock.Mock()
- self.neutron_mock.create_security_group.return_value = {
- 'security_group': {
- 'description': 'blah',
- 'id': ctx['security_group']['name'] + '_id',
- }
- }
- neutron_plugin.security_group.create(ctx)
- self.assertFalse(self.neutron_mock.create_security_group.mock_calls)
-
- def test_sg_use_existing_with_other_rules(self):
- ctx = self._setup_ctx()
- self.neutron_mock.cosmo_list = mock.Mock()
- self.neutron_mock.cosmo_list.return_value = [{
- 'id': ctx['security_group']['name'] + '_existing_id',
- 'description': 'blah',
- 'security_group_rules': [{
- 'remote_group_id': None,
- 'direction': 'ingress',
- 'protocol': 'tcp',
- 'ethertype': 'IPv4',
- 'port_range_max': 81, # Note the different port!
- 'port_range_min': 81, # Note the different port!
- 'remote_ip_prefix': '0.0.0.0/0',
- }]
- }]
- self.neutron_mock.create_security_group = mock.Mock()
- self.neutron_mock.create_security_group.return_value = {
- 'security_group': {
- 'description': 'blah',
- 'id': ctx['security_group']['name'] + '_id',
- }
- }
- self.assertRaises(
- NonRecoverableError,
- neutron_plugin.security_group.create,
- ctx
- )
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test_port.py b/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test_port.py
deleted file mode 100644
index 1acee3d05d..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test_port.py
+++ /dev/null
@@ -1,156 +0,0 @@
-########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import unittest
-
-import mock
-
-import neutron_plugin.port
-from cloudify.mocks import (MockCloudifyContext,
- MockNodeInstanceContext,
- MockRelationshipSubjectContext)
-from openstack_plugin_common import (NeutronClientWithSugar,
- OPENSTACK_ID_PROPERTY)
-from cloudify.exceptions import OperationRetry
-
-
-class TestPort(unittest.TestCase):
-
- def test_fixed_ips_no_fixed_ips(self):
- node_props = {'fixed_ip': ''}
-
- with mock.patch(
- 'neutron_plugin.port.'
- 'get_openstack_id_of_single_connected_node_by_openstack_type',
- self._get_connected_subnet_mock(return_empty=True)):
- with mock.patch(
- 'neutron_plugin.port.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
-
- port = {}
- neutron_plugin.port._handle_fixed_ips(port)
-
- self.assertNotIn('fixed_ips', port)
-
- def test_fixed_ips_subnet_only(self):
- node_props = {'fixed_ip': ''}
-
- with mock.patch(
- 'neutron_plugin.port.'
- 'get_openstack_id_of_single_connected_node_by_openstack_type',
- self._get_connected_subnet_mock(return_empty=False)):
- with mock.patch(
- 'neutron_plugin.port.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
-
- port = {}
- neutron_plugin.port._handle_fixed_ips(port)
-
- self.assertEquals([{'subnet_id': 'some-subnet-id'}],
- port.get('fixed_ips'))
-
- def test_fixed_ips_ip_address_only(self):
- node_props = {'fixed_ip': '1.2.3.4'}
-
- with mock.patch(
- 'neutron_plugin.port.'
- 'get_openstack_id_of_single_connected_node_by_openstack_type',
- self._get_connected_subnet_mock(return_empty=True)):
- with mock.patch(
- 'neutron_plugin.port.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
-
- port = {}
- neutron_plugin.port._handle_fixed_ips(port)
-
- self.assertEquals([{'ip_address': '1.2.3.4'}],
- port.get('fixed_ips'))
-
- def test_fixed_ips_subnet_and_ip_address(self):
- node_props = {'fixed_ip': '1.2.3.4'}
-
- with mock.patch(
- 'neutron_plugin.port.'
- 'get_openstack_id_of_single_connected_node_by_openstack_type',
- self._get_connected_subnet_mock(return_empty=False)):
- with mock.patch(
- 'neutron_plugin.port.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
-
- port = {}
- neutron_plugin.port._handle_fixed_ips(port)
-
- self.assertEquals([{'ip_address': '1.2.3.4',
- 'subnet_id': 'some-subnet-id'}],
- port.get('fixed_ips'))
-
- @staticmethod
- def _get_connected_subnet_mock(return_empty=True):
- return lambda *args, **kw: None if return_empty else 'some-subnet-id'
-
- @staticmethod
- def _get_mock_ctx_with_node_properties(properties):
- return MockCloudifyContext(node_id='test_node_id',
- properties=properties)
-
-
-class MockNeutronClient(NeutronClientWithSugar):
- """A fake neutron client with hard-coded test data."""
- def __init__(self, update):
- self.update = update
- self.body = {'port': {'id': 'test-id', 'security_groups': []}}
-
- def show_port(self, *_):
- return self.body
-
- def update_port(self, _, b, **__):
- if self.update:
- self.body.update(b)
- return
-
- def cosmo_get(self, *_, **__):
- return self.body['port']
-
-
-class TestPortSG(unittest.TestCase):
- @mock.patch('openstack_plugin_common._put_client_in_kw')
- def test_connect_sg_to_port(self, *_):
- mock_neutron = MockNeutronClient(update=True)
- ctx = MockCloudifyContext(
- source=MockRelationshipSubjectContext(node=mock.MagicMock(),
- instance=mock.MagicMock()),
- target=MockRelationshipSubjectContext(node=mock.MagicMock(),
- instance=mock.MagicMock()))
-
- with mock.patch('neutron_plugin.port.ctx', ctx):
- neutron_plugin.port.connect_security_group(mock_neutron)
- self.assertIsNone(ctx.operation._operation_retry)
-
- @mock.patch('openstack_plugin_common._put_client_in_kw')
- def test_connect_sg_to_port_race_condition(self, *_):
- mock_neutron = MockNeutronClient(update=False)
-
- ctx = MockCloudifyContext(
- source=MockRelationshipSubjectContext(node=mock.MagicMock(),
- instance=mock.MagicMock()),
- target=MockRelationshipSubjectContext(
- node=mock.MagicMock(),
- instance=MockNodeInstanceContext(
- runtime_properties={
- OPENSTACK_ID_PROPERTY: 'test-sg-id'})))
- with mock.patch('neutron_plugin.port.ctx', ctx):
- neutron_plugin.port.connect_security_group(mock_neutron, ctx=ctx)
- self.assertIsInstance(ctx.operation._operation_retry,
- OperationRetry)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test_security_group.py b/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test_security_group.py
deleted file mode 100644
index e958cddb33..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/neutron_plugin/tests/test_security_group.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# -*- coding: utf-8 -*-
-#########
-# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import unittest
-
-from mock import Mock, patch
-from requests.exceptions import RequestException
-
-from neutron_plugin import security_group
-
-from cloudify.exceptions import NonRecoverableError
-from cloudify.state import current_ctx
-
-from cloudify.mocks import MockCloudifyContext
-
-
-class FakeException(Exception):
- pass
-
-
-@patch('openstack_plugin_common.OpenStackClient._validate_auth_params')
-@patch('openstack_plugin_common.NeutronClientWithSugar')
-class TestSecurityGroup(unittest.TestCase):
-
- def setUp(self):
- super(TestSecurityGroup, self).setUp()
- self.nova_client = Mock()
-
- self.ctx = MockCloudifyContext(
- node_id='test',
- deployment_id='test',
- properties={
- 'description': 'The best Security Group. Great',
- 'rules': [],
- 'resource_id': 'mock_sg',
- 'security_group': {
- },
- 'server': {},
- 'openstack_config': {
- 'auth_url': 'things/v3',
- },
- },
- operation={'retry_number': 0},
- provider_context={'resources': {}}
- )
- current_ctx.set(self.ctx)
- self.addCleanup(current_ctx.clear)
-
- findctx = patch(
- 'openstack_plugin_common._find_context_in_kw',
- return_value=self.ctx,
- )
- findctx.start()
- self.addCleanup(findctx.stop)
-
- def test_set_sg_runtime_properties(self, mock_nc, *_):
- security_group.create(
- nova_client=self.nova_client,
- ctx=self.ctx,
- args={},
- )
-
- self.assertEqual(
- {
- 'external_type': 'security_group',
- 'external_id': mock_nc().get_id_from_resource(),
- 'external_name': mock_nc().get_name_from_resource(),
- },
- self.ctx.instance.runtime_properties
- )
-
- def test_create_sg_wait_timeout(self, mock_nc, *_):
- mock_nc().show_security_group.side_effect = RequestException
-
- with self.assertRaises(NonRecoverableError):
- security_group.create(
- nova_client=self.nova_client,
- ctx=self.ctx,
- args={},
- status_attempts=3,
- status_timeout=0.001,
- )
-
- @patch(
- 'neutron_plugin.security_group.delete_resource_and_runtime_properties')
- def test_dont_duplicate_if_failed_rule(self, mock_del_res, mock_nc, *_):
- self.ctx.node.properties['rules'] = [
- {
- 'port': '🍷',
- },
- ]
- mock_nc().create_security_group_rule.side_effect = FakeException
- mock_del_res.side_effect = FakeException('the 2nd')
-
- with self.assertRaises(NonRecoverableError) as e:
- security_group.create(
- nova_client=self.nova_client,
- ctx=self.ctx,
- args={},
- )
-
- self.assertIn('the 2nd', str(e.exception))
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/__init__.py
deleted file mode 100644
index bb533273be..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-__author__ = 'idanmo'
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/floatingip.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/floatingip.py
deleted file mode 100644
index e770c540a8..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/floatingip.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from openstack_plugin_common import with_nova_client
-from openstack_plugin_common.floatingip import (
- use_external_floatingip,
- set_floatingip_runtime_properties,
- delete_floatingip,
- floatingip_creation_validation
-)
-
-
-# random note regarding nova floating-ips: floating ips on nova-net have
-# pre-assigned ids, and thus a call "nova.floating_ips.get(<fip_id>)" will
-# return a value even if the floating-ip isn't even allocated.
-# currently all lookups in the code, including by id, use search (i.e.
-# nova.<type>.findall) and lists, which won't return such unallocated
-# resources.
-
-@operation
-@with_nova_client
-def create(nova_client, args, **kwargs):
-
- if use_external_floatingip(nova_client, 'ip',
- lambda ext_fip: ext_fip.ip):
- return
-
- floatingip = {
- 'pool': None
- }
- floatingip.update(ctx.node.properties['floatingip'], **args)
-
- fip = nova_client.floating_ips.create(floatingip['pool'])
- set_floatingip_runtime_properties(fip.id, fip.ip)
-
-
-@operation
-@with_nova_client
-def delete(nova_client, **kwargs):
- delete_floatingip(nova_client)
-
-
-@operation
-@with_nova_client
-def creation_validation(nova_client, **kwargs):
- floatingip_creation_validation(nova_client, 'ip')
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/keypair.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/keypair.py
deleted file mode 100644
index 92281ab9e5..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/keypair.py
+++ /dev/null
@@ -1,202 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import os
-import errno
-from getpass import getuser
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from cloudify.exceptions import NonRecoverableError
-from openstack_plugin_common import (
- with_nova_client,
- validate_resource,
- use_external_resource,
- transform_resource_name,
- is_external_resource,
- is_external_resource_not_conditionally_created,
- delete_runtime_properties,
- get_resource_id,
- delete_resource_and_runtime_properties,
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY,
- COMMON_RUNTIME_PROPERTIES_KEYS
-)
-
-RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS
-KEYPAIR_OPENSTACK_TYPE = 'keypair'
-
-PRIVATE_KEY_PATH_PROP = 'private_key_path'
-
-
-@operation
-@with_nova_client
-def create(nova_client, args, **kwargs):
-
- private_key_path = _get_private_key_path()
- pk_exists = _check_private_key_exists(private_key_path)
-
- if use_external_resource(ctx, nova_client, KEYPAIR_OPENSTACK_TYPE):
- if not pk_exists:
- delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS)
- raise NonRecoverableError(
- 'Failed to use external keypair (node {0}): the public key {1}'
- ' is available on Openstack, but the private key could not be '
- 'found at {2}'.format(ctx.node.id,
- ctx.node.properties['resource_id'],
- private_key_path))
- return
-
- if pk_exists:
- raise NonRecoverableError(
- "Can't create keypair - private key path already exists: {0}"
- .format(private_key_path))
-
- keypair = {
- 'name': get_resource_id(ctx, KEYPAIR_OPENSTACK_TYPE),
- }
- keypair.update(ctx.node.properties['keypair'], **args)
- transform_resource_name(ctx, keypair)
-
- keypair = nova_client.keypairs.create(keypair['name'],
- keypair.get('public_key'))
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = keypair.id
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \
- KEYPAIR_OPENSTACK_TYPE
- ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = keypair.name
-
- try:
- # write private key file
- _mkdir_p(os.path.dirname(private_key_path))
- with open(private_key_path, 'w') as f:
- f.write(keypair.private_key)
- os.chmod(private_key_path, 0600)
- except Exception:
- _delete_private_key_file()
- delete_resource_and_runtime_properties(ctx, nova_client,
- RUNTIME_PROPERTIES_KEYS)
- raise
-
-
-@operation
-@with_nova_client
-def delete(nova_client, **kwargs):
- if not is_external_resource(ctx):
- ctx.logger.info('deleting keypair')
-
- _delete_private_key_file()
-
- nova_client.keypairs.delete(
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY])
- else:
- ctx.logger.info('not deleting keypair since an external keypair is '
- 'being used')
-
- delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS)
-
-
-@operation
-@with_nova_client
-def creation_validation(nova_client, **kwargs):
-
- def validate_private_key_permissions(private_key_path):
- ctx.logger.debug('checking whether private key file {0} has the '
- 'correct permissions'.format(private_key_path))
- if not os.access(private_key_path, os.R_OK):
- err = 'private key file {0} is not readable'\
- .format(private_key_path)
- ctx.logger.error('VALIDATION ERROR: ' + err)
- raise NonRecoverableError(err)
- ctx.logger.debug('OK: private key file {0} has the correct '
- 'permissions'.format(private_key_path))
-
- def validate_path_owner(path):
- ctx.logger.debug('checking whether directory {0} is owned by the '
- 'current user'.format(path))
- from pwd import getpwnam, getpwuid
-
- user = getuser()
- owner = getpwuid(os.stat(path).st_uid).pw_name
- current_user_id = str(getpwnam(user).pw_uid)
- owner_id = str(os.stat(path).st_uid)
-
- if not current_user_id == owner_id:
- err = '{0} is not owned by the current user (it is owned by {1})'\
- .format(path, owner)
- ctx.logger.warning('VALIDATION WARNING: {0}'.format(err))
- return
- ctx.logger.debug('OK: {0} is owned by the current user'.format(path))
-
- validate_resource(ctx, nova_client, KEYPAIR_OPENSTACK_TYPE)
-
- private_key_path = _get_private_key_path()
- pk_exists = _check_private_key_exists(private_key_path)
-
- if is_external_resource_not_conditionally_created(ctx):
- if pk_exists:
- if os.name == 'posix':
- validate_private_key_permissions(private_key_path)
- validate_path_owner(private_key_path)
- else:
- err = "can't use external keypair: the public key {0} is " \
- "available on Openstack, but the private key could not be " \
- "found at {1}".format(ctx.node.properties['resource_id'],
- private_key_path)
- ctx.logger.error('VALIDATION ERROR: {0}'.format(err))
- raise NonRecoverableError(err)
- else:
- if pk_exists:
- err = 'private key path already exists: {0}'.format(
- private_key_path)
- ctx.logger.error('VALIDATION ERROR: {0}'.format(err))
- raise NonRecoverableError(err)
- else:
- err = 'private key directory {0} is not writable'
- while private_key_path:
- if os.path.isdir(private_key_path):
- if not os.access(private_key_path, os.W_OK | os.X_OK):
- raise NonRecoverableError(err.format(private_key_path))
- else:
- break
- private_key_path, _ = os.path.split(private_key_path)
-
- ctx.logger.debug('OK: keypair configuration is valid')
-
-
-def _get_private_key_path():
- return os.path.expanduser(ctx.node.properties[PRIVATE_KEY_PATH_PROP])
-
-
-def _delete_private_key_file():
- private_key_path = _get_private_key_path()
- ctx.logger.debug('deleting private key file at {0}'.format(
- private_key_path))
- try:
- os.remove(private_key_path)
- except OSError as e:
- if e.errno == errno.ENOENT:
- # file was already deleted somehow
- pass
- raise
-
-
-def _check_private_key_exists(private_key_path):
- return os.path.isfile(private_key_path)
-
-
-def _mkdir_p(path):
- if path and not os.path.isdir(path):
- os.makedirs(path)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/security_group.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/security_group.py
deleted file mode 100644
index 283eae85cf..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/security_group.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from cloudify import ctx
-from cloudify.decorators import operation
-from openstack_plugin_common import (
- transform_resource_name,
- with_nova_client,
- delete_resource_and_runtime_properties
-)
-from openstack_plugin_common.security_group import (
- build_sg_data,
- process_rules,
- use_external_sg,
- set_sg_runtime_properties,
- delete_sg,
- sg_creation_validation,
- RUNTIME_PROPERTIES_KEYS
-)
-
-
-@operation
-@with_nova_client
-def create(nova_client, args, **kwargs):
-
- security_group = build_sg_data(args)
- security_group['description'] = ctx.node.properties['description']
-
- sgr_default_values = {
- 'ip_protocol': 'tcp',
- 'from_port': 1,
- 'to_port': 65535,
- 'cidr': '0.0.0.0/0',
- # 'group_id': None,
- # 'parent_group_id': None,
- }
- sg_rules = process_rules(nova_client, sgr_default_values,
- 'cidr', 'group_id', 'from_port', 'to_port')
-
- if use_external_sg(nova_client):
- return
-
- transform_resource_name(ctx, security_group)
-
- sg = nova_client.security_groups.create(
- security_group['name'], security_group['description'])
-
- set_sg_runtime_properties(sg, nova_client)
-
- try:
- for sgr in sg_rules:
- sgr['parent_group_id'] = sg.id
- nova_client.security_group_rules.create(**sgr)
- except Exception:
- delete_resource_and_runtime_properties(ctx, nova_client,
- RUNTIME_PROPERTIES_KEYS)
- raise
-
-
-@operation
-@with_nova_client
-def delete(nova_client, **kwargs):
- delete_sg(nova_client)
-
-
-@operation
-@with_nova_client
-def creation_validation(nova_client, **kwargs):
- sg_creation_validation(nova_client, 'cidr')
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/server.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/server.py
deleted file mode 100644
index 6726f24804..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/server.py
+++ /dev/null
@@ -1,944 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-
-import os
-import time
-import copy
-import operator
-
-from novaclient import exceptions as nova_exceptions
-
-from cloudify import ctx
-from cloudify.manager import get_rest_client
-from cloudify.decorators import operation
-from cloudify.exceptions import NonRecoverableError, RecoverableError
-from cinder_plugin import volume
-from openstack_plugin_common import (
- provider,
- transform_resource_name,
- get_resource_id,
- get_openstack_ids_of_connected_nodes_by_openstack_type,
- with_nova_client,
- with_cinder_client,
- assign_payload_as_runtime_properties,
- get_openstack_id_of_single_connected_node_by_openstack_type,
- get_openstack_names_of_connected_nodes_by_openstack_type,
- get_single_connected_node_by_openstack_type,
- is_external_resource,
- is_external_resource_by_properties,
- is_external_resource_not_conditionally_created,
- is_external_relationship_not_conditionally_created,
- use_external_resource,
- delete_runtime_properties,
- is_external_relationship,
- validate_resource,
- USE_EXTERNAL_RESOURCE_PROPERTY,
- OPENSTACK_AZ_PROPERTY,
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY,
- COMMON_RUNTIME_PROPERTIES_KEYS,
- with_neutron_client)
-from nova_plugin.keypair import KEYPAIR_OPENSTACK_TYPE
-from nova_plugin import userdata
-from openstack_plugin_common.floatingip import (IP_ADDRESS_PROPERTY,
- get_server_floating_ip)
-from neutron_plugin.network import NETWORK_OPENSTACK_TYPE
-from neutron_plugin.port import PORT_OPENSTACK_TYPE
-from cinder_plugin.volume import VOLUME_OPENSTACK_TYPE
-from openstack_plugin_common.security_group import \
- SECURITY_GROUP_OPENSTACK_TYPE
-from glance_plugin.image import handle_image_from_relationship
-
-SERVER_OPENSTACK_TYPE = 'server'
-
-# server status constants. Full lists here: http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html # NOQA
-SERVER_STATUS_ACTIVE = 'ACTIVE'
-SERVER_STATUS_BUILD = 'BUILD'
-SERVER_STATUS_SHUTOFF = 'SHUTOFF'
-
-OS_EXT_STS_TASK_STATE = 'OS-EXT-STS:task_state'
-SERVER_TASK_STATE_POWERING_ON = 'powering-on'
-
-MUST_SPECIFY_NETWORK_EXCEPTION_TEXT = 'More than one possible network found.'
-SERVER_DELETE_CHECK_SLEEP = 2
-
-# Runtime properties
-NETWORKS_PROPERTY = 'networks' # all of the server's ips
-IP_PROPERTY = 'ip' # the server's private ip
-ADMIN_PASSWORD_PROPERTY = 'password' # the server's password
-RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + \
- [NETWORKS_PROPERTY, IP_PROPERTY, ADMIN_PASSWORD_PROPERTY]
-
-
-def _get_management_network_id_and_name(neutron_client, ctx):
- """Examine the context to find the management network id and name."""
- management_network_id = None
- management_network_name = None
- provider_context = provider(ctx)
-
- if ('management_network_name' in ctx.node.properties) and \
- ctx.node.properties['management_network_name']:
- management_network_name = \
- ctx.node.properties['management_network_name']
- management_network_name = transform_resource_name(
- ctx, management_network_name)
- management_network_id = neutron_client.cosmo_get_named(
- 'network', management_network_name)
- management_network_id = management_network_id['id']
- else:
- int_network = provider_context.int_network
- if int_network:
- management_network_id = int_network['id']
- management_network_name = int_network['name'] # Already transform.
-
- return management_network_id, management_network_name
-
-
-def _merge_nics(management_network_id, *nics_sources):
- """Merge nics_sources into a single nics list, insert mgmt network if
- needed.
- nics_sources are lists of networks received from several sources
- (server properties, relationships to networks, relationships to ports).
- Merge them into a single list, and if the management network isn't present
- there, prepend it as the first network.
- """
- merged = []
- for nics in nics_sources:
- merged.extend(nics)
- if management_network_id is not None and \
- not any(nic['net-id'] == management_network_id for nic in merged):
- merged.insert(0, {'net-id': management_network_id})
- return merged
-
-
-def _normalize_nics(nics):
- """Transform the NICs passed to the form expected by openstack.
-
- If both net-id and port-id are provided, remove net-id: it is ignored
- by openstack anyway.
- """
- def _normalize(nic):
- if 'port-id' in nic and 'net-id' in nic:
- nic = nic.copy()
- del nic['net-id']
- return nic
- return [_normalize(nic) for nic in nics]
-
-
-def _prepare_server_nics(neutron_client, ctx, server):
- """Update server['nics'] based on declared relationships.
-
- server['nics'] should contain the pre-declared nics, then the networks
- that the server has a declared relationship to, then the networks
- of the ports the server has a relationship to.
-
- If that doesn't include the management network, it should be prepended
- as the first network.
-
- The management network id and name are stored in the server meta properties
- """
- network_ids = get_openstack_ids_of_connected_nodes_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE)
- port_ids = get_openstack_ids_of_connected_nodes_by_openstack_type(
- ctx, PORT_OPENSTACK_TYPE)
- management_network_id, management_network_name = \
- _get_management_network_id_and_name(neutron_client, ctx)
- if management_network_id is None and (network_ids or port_ids):
- # Known limitation
- raise NonRecoverableError(
- "Nova server with NICs requires "
- "'management_network_name' in properties or id "
- "from provider context, which was not supplied")
-
- nics = _merge_nics(
- management_network_id,
- server.get('nics', []),
- [{'net-id': net_id} for net_id in network_ids],
- get_port_networks(neutron_client, port_ids))
-
- nics = _normalize_nics(nics)
-
- server['nics'] = nics
- if management_network_id is not None:
- server['meta']['cloudify_management_network_id'] = \
- management_network_id
- if management_network_name is not None:
- server['meta']['cloudify_management_network_name'] = \
- management_network_name
-
-
-def _get_boot_volume_relationships(type_name, ctx):
- ctx.logger.debug('Instance relationship target instances: {0}'.format(str([
- rel.target.instance.runtime_properties
- for rel in ctx.instance.relationships])))
- targets = [
- rel.target.instance
- for rel in ctx.instance.relationships
- if rel.target.instance.runtime_properties.get(
- OPENSTACK_TYPE_PROPERTY) == type_name and
- rel.target.node.properties.get('boot', False)]
-
- if not targets:
- return None
- elif len(targets) > 1:
- raise NonRecoverableError("2 boot volumes not supported")
- return targets[0]
-
-
-def _handle_boot_volume(server, ctx):
- boot_volume = _get_boot_volume_relationships(VOLUME_OPENSTACK_TYPE, ctx)
- if boot_volume:
- boot_volume_id = boot_volume.runtime_properties[OPENSTACK_ID_PROPERTY]
- ctx.logger.info('boot_volume_id: {0}'.format(boot_volume_id))
- az = boot_volume.runtime_properties[OPENSTACK_AZ_PROPERTY]
- # If a block device mapping already exists we shouldn't overwrite it
- # completely
- bdm = server.setdefault('block_device_mapping', {})
- bdm['vda'] = '{0}:::0'.format(boot_volume_id)
- # Some nova configurations allow cross-az server-volume connections, so
- # we can't treat that as an error.
- if not server.get('availability_zone'):
- server['availability_zone'] = az
-
-
-@operation
-@with_nova_client
-@with_neutron_client
-def create(nova_client, neutron_client, args, **kwargs):
- """
- Creates a server. Exposes the parameters mentioned in
- http://docs.openstack.org/developer/python-novaclient/api/novaclient.v1_1
- .servers.html#novaclient.v1_1.servers.ServerManager.create
- """
-
- external_server = use_external_resource(ctx, nova_client,
- SERVER_OPENSTACK_TYPE)
-
- if external_server:
- _set_network_and_ip_runtime_properties(external_server)
- if ctx._local:
- return
- else:
- network_ids = \
- get_openstack_ids_of_connected_nodes_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE)
- port_ids = get_openstack_ids_of_connected_nodes_by_openstack_type(
- ctx, PORT_OPENSTACK_TYPE)
- try:
- _validate_external_server_nics(
- neutron_client,
- network_ids,
- port_ids
- )
- _validate_external_server_keypair(nova_client)
- return
- except Exception:
- delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS)
- raise
-
- provider_context = provider(ctx)
-
- def rename(name):
- return transform_resource_name(ctx, name)
-
- server = {
- 'name': get_resource_id(ctx, SERVER_OPENSTACK_TYPE),
- }
- server.update(copy.deepcopy(ctx.node.properties['server']))
- server.update(copy.deepcopy(args))
-
- _handle_boot_volume(server, ctx)
- handle_image_from_relationship(server, 'image', ctx)
-
- if 'meta' not in server:
- server['meta'] = dict()
-
- transform_resource_name(ctx, server)
-
- ctx.logger.debug(
- "server.create() server before transformations: {0}".format(server))
-
- for key in 'block_device_mapping', 'block_device_mapping_v2':
- if key in server:
- # if there is a connected boot volume, don't require the `image`
- # property.
- # However, python-novaclient requires an `image` input anyway, and
- # checks it for truthiness when deciding whether to pass it along
- # to the API
- if 'image' not in server:
- server['image'] = ctx.node.properties.get('image')
- break
- else:
- _handle_image_or_flavor(server, nova_client, 'image')
- _handle_image_or_flavor(server, nova_client, 'flavor')
-
- if provider_context.agents_security_group:
- security_groups = server.get('security_groups', [])
- asg = provider_context.agents_security_group['name']
- if asg not in security_groups:
- security_groups.append(asg)
- server['security_groups'] = security_groups
- elif not server.get('security_groups', []):
- # Make sure that if the server is connected to a security group
- # from CREATE time so that there the user can control
- # that there is never a time that a running server is not protected.
- security_group_names = \
- get_openstack_names_of_connected_nodes_by_openstack_type(
- ctx,
- SECURITY_GROUP_OPENSTACK_TYPE)
- server['security_groups'] = security_group_names
-
- # server keypair handling
- keypair_id = get_openstack_id_of_single_connected_node_by_openstack_type(
- ctx, KEYPAIR_OPENSTACK_TYPE, True)
-
- if 'key_name' in server:
- if keypair_id:
- raise NonRecoverableError("server can't both have the "
- '"key_name" nested property and be '
- 'connected to a keypair via a '
- 'relationship at the same time')
- server['key_name'] = rename(server['key_name'])
- elif keypair_id:
- server['key_name'] = _get_keypair_name_by_id(nova_client, keypair_id)
- elif provider_context.agents_keypair:
- server['key_name'] = provider_context.agents_keypair['name']
- else:
- server['key_name'] = None
- ctx.logger.info(
- 'server must have a keypair, yet no keypair was connected to the '
- 'server node, the "key_name" nested property '
- "wasn't used, and there is no agent keypair in the provider "
- "context. Agent installation can have issues.")
-
- _fail_on_missing_required_parameters(
- server,
- ('name', 'flavor'),
- 'server')
-
- _prepare_server_nics(neutron_client, ctx, server)
-
- ctx.logger.debug(
- "server.create() server after transformations: {0}".format(server))
-
- userdata.handle_userdata(server)
-
- ctx.logger.info("Creating VM with parameters: {0}".format(str(server)))
- # Store the server dictionary contents in runtime properties
- assign_payload_as_runtime_properties(ctx, SERVER_OPENSTACK_TYPE, server)
- ctx.logger.debug(
- "Asking Nova to create server. All possible parameters are: {0})"
- .format(','.join(server.keys())))
-
- try:
- s = nova_client.servers.create(**server)
- except nova_exceptions.BadRequest as e:
- if 'Block Device Mapping is Invalid' in str(e):
- return ctx.operation.retry(
- message='Block Device Mapping is not created yet',
- retry_after=30)
- if str(e).startswith(MUST_SPECIFY_NETWORK_EXCEPTION_TEXT):
- raise NonRecoverableError(
- "Can not provision server: management_network_name or id"
- " is not specified but there are several networks that the "
- "server can be connected to.")
- raise
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = s.id
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \
- SERVER_OPENSTACK_TYPE
- ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = server['name']
-
-
-def get_port_networks(neutron_client, port_ids):
-
- def get_network(port_id):
- port = neutron_client.show_port(port_id)
- return {
- 'net-id': port['port']['network_id'],
- 'port-id': port['port']['id']
- }
-
- return map(get_network, port_ids)
-
-
-@operation
-@with_nova_client
-def start(nova_client, start_retry_interval, private_key_path, **kwargs):
- server = get_server_by_context(nova_client)
-
- if is_external_resource_not_conditionally_created(ctx):
- ctx.logger.info('Validating external server is started')
- if server.status != SERVER_STATUS_ACTIVE:
- raise NonRecoverableError(
- 'Expected external resource server {0} to be in '
- '"{1}" status'.format(server.id, SERVER_STATUS_ACTIVE))
- return
-
- if server.status == SERVER_STATUS_ACTIVE:
- ctx.logger.info('Server is {0}'.format(server.status))
-
- if ctx.node.properties['use_password']:
- private_key = _get_private_key(private_key_path)
- ctx.logger.debug('retrieving password for server')
- password = server.get_password(private_key)
-
- if not password:
- return ctx.operation.retry(
- message='Waiting for server to post generated password',
- retry_after=start_retry_interval)
-
- ctx.instance.runtime_properties[ADMIN_PASSWORD_PROPERTY] = password
- ctx.logger.info('Server has been set with a password')
-
- _set_network_and_ip_runtime_properties(server)
- return
-
- server_task_state = getattr(server, OS_EXT_STS_TASK_STATE)
-
- if server.status == SERVER_STATUS_SHUTOFF and \
- server_task_state != SERVER_TASK_STATE_POWERING_ON:
- ctx.logger.info('Server is in {0} status - starting server...'.format(
- SERVER_STATUS_SHUTOFF))
- server.start()
- server_task_state = SERVER_TASK_STATE_POWERING_ON
-
- if server.status == SERVER_STATUS_BUILD or \
- server_task_state == SERVER_TASK_STATE_POWERING_ON:
- return ctx.operation.retry(
- message='Waiting for server to be in {0} state but is in {1}:{2} '
- 'state. Retrying...'.format(SERVER_STATUS_ACTIVE,
- server.status,
- server_task_state),
- retry_after=start_retry_interval)
-
- raise NonRecoverableError(
- 'Unexpected server state {0}:{1}'.format(server.status,
- server_task_state))
-
-
-@operation
-@with_nova_client
-def stop(nova_client, **kwargs):
- """
- Stop server.
-
- Depends on OpenStack implementation, server.stop() might not be supported.
- """
- if is_external_resource(ctx):
- ctx.logger.info('Not stopping server since an external server is '
- 'being used')
- return
-
- server = get_server_by_context(nova_client)
-
- if server.status != SERVER_STATUS_SHUTOFF:
- nova_client.servers.stop(server)
- else:
- ctx.logger.info('Server is already stopped')
-
-
-@operation
-@with_nova_client
-def delete(nova_client, **kwargs):
- if not is_external_resource(ctx):
- ctx.logger.info('deleting server')
- server = get_server_by_context(nova_client)
- nova_client.servers.delete(server)
- _wait_for_server_to_be_deleted(nova_client, server)
- else:
- ctx.logger.info('not deleting server since an external server is '
- 'being used')
-
- delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS)
-
-
-def _wait_for_server_to_be_deleted(nova_client,
- server,
- timeout=120,
- sleep_interval=5):
- timeout = time.time() + timeout
- while time.time() < timeout:
- try:
- server = nova_client.servers.get(server)
- ctx.logger.debug('Waiting for server "{}" to be deleted. current'
- ' status: {}'.format(server.id, server.status))
- time.sleep(sleep_interval)
- except nova_exceptions.NotFound:
- return
- # recoverable error
- raise RuntimeError('Server {} has not been deleted. waited for {} seconds'
- .format(server.id, timeout))
-
-
-def get_server_by_context(nova_client):
- return nova_client.servers.get(
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY])
-
-
-def _set_network_and_ip_runtime_properties(server):
-
- ips = {}
-
- if not server.networks:
- raise NonRecoverableError(
- 'The server was created but not attached to a network. '
- 'Cloudify requires that a server is connected to '
- 'at least one port.'
- )
-
- manager_network_ip = None
- management_network_name = server.metadata.get(
- 'cloudify_management_network_name')
-
- for network, network_ips in server.networks.items():
- if (management_network_name and
- network == management_network_name) or not \
- manager_network_ip:
- manager_network_ip = next(iter(network_ips or []), None)
- ips[network] = network_ips
- ctx.instance.runtime_properties[NETWORKS_PROPERTY] = ips
- # The ip of this instance in the management network
- ctx.instance.runtime_properties[IP_PROPERTY] = manager_network_ip
-
-
-@operation
-@with_nova_client
-def connect_floatingip(nova_client, fixed_ip, **kwargs):
- server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- floating_ip_id = ctx.target.instance.runtime_properties[
- OPENSTACK_ID_PROPERTY]
-
- if is_external_relationship_not_conditionally_created(ctx):
- ctx.logger.info('Validating external floatingip and server '
- 'are associated')
- if nova_client.floating_ips.get(floating_ip_id).instance_id ==\
- server_id:
- return
- raise NonRecoverableError(
- 'Expected external resources server {0} and floating-ip {1} to be '
- 'connected'.format(server_id, floating_ip_id))
-
- floating_ip_address = ctx.target.instance.runtime_properties[
- IP_ADDRESS_PROPERTY]
- server = nova_client.servers.get(server_id)
- server.add_floating_ip(floating_ip_address, fixed_ip or None)
-
- server = nova_client.servers.get(server_id)
- all_server_ips = reduce(operator.add, server.networks.values())
- if floating_ip_address not in all_server_ips:
- return ctx.operation.retry(message='Failed to assign floating ip {0}'
- ' to machine {1}.'
- .format(floating_ip_address, server_id))
-
-
-@operation
-@with_nova_client
-@with_neutron_client
-def disconnect_floatingip(nova_client, neutron_client, **kwargs):
- if is_external_relationship(ctx):
- ctx.logger.info('Not disassociating floatingip and server since '
- 'external floatingip and server are being used')
- return
-
- server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- ctx.logger.info("Remove floating ip {0}".format(
- ctx.target.instance.runtime_properties[IP_ADDRESS_PROPERTY]))
- server_floating_ip = get_server_floating_ip(neutron_client, server_id)
- if server_floating_ip:
- server = nova_client.servers.get(server_id)
- server.remove_floating_ip(server_floating_ip['floating_ip_address'])
- ctx.logger.info("Floating ip {0} detached from server"
- .format(server_floating_ip['floating_ip_address']))
-
-
-@operation
-@with_nova_client
-def connect_security_group(nova_client, **kwargs):
- server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- security_group_id = ctx.target.instance.runtime_properties[
- OPENSTACK_ID_PROPERTY]
- security_group_name = ctx.target.instance.runtime_properties[
- OPENSTACK_NAME_PROPERTY]
-
- if is_external_relationship_not_conditionally_created(ctx):
- ctx.logger.info('Validating external security group and server '
- 'are associated')
- server = nova_client.servers.get(server_id)
- if [sg for sg in server.list_security_group() if sg.id ==
- security_group_id]:
- return
- raise NonRecoverableError(
- 'Expected external resources server {0} and security-group {1} to '
- 'be connected'.format(server_id, security_group_id))
-
- server = nova_client.servers.get(server_id)
- for security_group in server.list_security_group():
- # Since some security groups are already attached in
- # create this will ensure that they are not attached twice.
- if security_group_id != security_group.id and \
- security_group_name != security_group.name:
- # to support nova security groups as well,
- # we connect the security group by name
- # (as connecting by id
- # doesn't seem to work well for nova SGs)
- server.add_security_group(security_group_name)
-
- _validate_security_group_and_server_connection_status(nova_client,
- server_id,
- security_group_id,
- security_group_name,
- is_connected=True)
-
-
-@operation
-@with_nova_client
-def disconnect_security_group(nova_client, **kwargs):
- if is_external_relationship(ctx):
- ctx.logger.info('Not disconnecting security group and server since '
- 'external security group and server are being used')
- return
-
- server_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- security_group_id = ctx.target.instance.runtime_properties[
- OPENSTACK_ID_PROPERTY]
- security_group_name = ctx.target.instance.runtime_properties[
- OPENSTACK_NAME_PROPERTY]
- server = nova_client.servers.get(server_id)
- # to support nova security groups as well, we disconnect the security group
- # by name (as disconnecting by id doesn't seem to work well for nova SGs)
- server.remove_security_group(security_group_name)
-
- _validate_security_group_and_server_connection_status(nova_client,
- server_id,
- security_group_id,
- security_group_name,
- is_connected=False)
-
-
-@operation
-@with_nova_client
-@with_cinder_client
-def attach_volume(nova_client, cinder_client, status_attempts,
- status_timeout, **kwargs):
- server_id = ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- volume_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
-
- if is_external_relationship_not_conditionally_created(ctx):
- ctx.logger.info('Validating external volume and server '
- 'are connected')
- attachment = volume.get_attachment(cinder_client=cinder_client,
- volume_id=volume_id,
- server_id=server_id)
- if attachment:
- return
- else:
- raise NonRecoverableError(
- 'Expected external resources server {0} and volume {1} to be '
- 'connected'.format(server_id, volume_id))
-
- # Note: The 'device_name' property should actually be a property of the
- # relationship between a server and a volume; It'll move to that
- # relationship type once relationship properties are better supported.
- device = ctx.source.node.properties[volume.DEVICE_NAME_PROPERTY]
- nova_client.volumes.create_server_volume(
- server_id,
- volume_id,
- device if device != 'auto' else None)
- try:
- vol, wait_succeeded = volume.wait_until_status(
- cinder_client=cinder_client,
- volume_id=volume_id,
- status=volume.VOLUME_STATUS_IN_USE,
- num_tries=status_attempts,
- timeout=status_timeout
- )
- if not wait_succeeded:
- raise RecoverableError(
- 'Waiting for volume status {0} failed - detaching volume and '
- 'retrying..'.format(volume.VOLUME_STATUS_IN_USE))
- if device == 'auto':
- # The device name was assigned automatically so we
- # query the actual device name
- attachment = volume.get_attachment(
- cinder_client=cinder_client,
- volume_id=volume_id,
- server_id=server_id
- )
- device_name = attachment['device']
- ctx.logger.info('Detected device name for attachment of volume '
- '{0} to server {1}: {2}'
- .format(volume_id, server_id, device_name))
- ctx.source.instance.runtime_properties[
- volume.DEVICE_NAME_PROPERTY] = device_name
- except Exception, e:
- if not isinstance(e, NonRecoverableError):
- _prepare_attach_volume_to_be_repeated(
- nova_client, cinder_client, server_id, volume_id,
- status_attempts, status_timeout)
- raise
-
-
-def _prepare_attach_volume_to_be_repeated(
- nova_client, cinder_client, server_id, volume_id,
- status_attempts, status_timeout):
-
- ctx.logger.info('Cleaning after a failed attach_volume() call')
- try:
- _detach_volume(nova_client, cinder_client, server_id, volume_id,
- status_attempts, status_timeout)
- except Exception, e:
- ctx.logger.error('Cleaning after a failed attach_volume() call failed '
- 'raising a \'{0}\' exception.'.format(e))
- raise NonRecoverableError(e)
-
-
-def _detach_volume(nova_client, cinder_client, server_id, volume_id,
- status_attempts, status_timeout):
- attachment = volume.get_attachment(cinder_client=cinder_client,
- volume_id=volume_id,
- server_id=server_id)
- if attachment:
- nova_client.volumes.delete_server_volume(server_id, attachment['id'])
- volume.wait_until_status(cinder_client=cinder_client,
- volume_id=volume_id,
- status=volume.VOLUME_STATUS_AVAILABLE,
- num_tries=status_attempts,
- timeout=status_timeout)
-
-
-@operation
-@with_nova_client
-@with_cinder_client
-def detach_volume(nova_client, cinder_client, status_attempts,
- status_timeout, **kwargs):
- if is_external_relationship(ctx):
- ctx.logger.info('Not detaching volume from server since '
- 'external volume and server are being used')
- return
-
- server_id = ctx.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- volume_id = ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
-
- _detach_volume(nova_client, cinder_client, server_id, volume_id,
- status_attempts, status_timeout)
-
-
-def _fail_on_missing_required_parameters(obj, required_parameters, hint_where):
- for k in required_parameters:
- if k not in obj:
- raise NonRecoverableError(
- "Required parameter '{0}' is missing (under host's "
- "properties.{1}). Required parameters are: {2}"
- .format(k, hint_where, required_parameters))
-
-
-def _validate_external_server_keypair(nova_client):
- keypair_id = get_openstack_id_of_single_connected_node_by_openstack_type(
- ctx, KEYPAIR_OPENSTACK_TYPE, True)
- if not keypair_id:
- return
-
- keypair_instance_id = \
- [node_instance_id for node_instance_id, runtime_props in
- ctx.capabilities.get_all().iteritems() if
- runtime_props.get(OPENSTACK_ID_PROPERTY) == keypair_id][0]
- keypair_node_properties = _get_properties_by_node_instance_id(
- keypair_instance_id)
- if not is_external_resource_by_properties(keypair_node_properties):
- raise NonRecoverableError(
- "Can't connect a new keypair node to a server node "
- "with '{0}'=True".format(USE_EXTERNAL_RESOURCE_PROPERTY))
-
- server = get_server_by_context(nova_client)
- if keypair_id == _get_keypair_name_by_id(nova_client, server.key_name):
- return
- raise NonRecoverableError(
- "Expected external resources server {0} and keypair {1} to be "
- "connected".format(server.id, keypair_id))
-
-
-def _get_keypair_name_by_id(nova_client, key_name):
- keypair = nova_client.cosmo_get_named(KEYPAIR_OPENSTACK_TYPE, key_name)
- return keypair.id
-
-
-def _validate_external_server_nics(neutron_client, network_ids, port_ids):
- # validate no new nics are being assigned to an existing server (which
- # isn't possible on Openstack)
- new_nic_nodes = \
- [node_instance_id for node_instance_id, runtime_props in
- ctx.capabilities.get_all().iteritems() if runtime_props.get(
- OPENSTACK_TYPE_PROPERTY) in (PORT_OPENSTACK_TYPE,
- NETWORK_OPENSTACK_TYPE) and
- not is_external_resource_by_properties(
- _get_properties_by_node_instance_id(node_instance_id))]
- if new_nic_nodes:
- raise NonRecoverableError(
- "Can't connect new port and/or network nodes to a server node "
- "with '{0}'=True".format(USE_EXTERNAL_RESOURCE_PROPERTY))
-
- # validate all expected connected networks and ports are indeed already
- # connected to the server. note that additional networks (e.g. the
- # management network) may be connected as well with no error raised
- if not network_ids and not port_ids:
- return
-
- server_id = ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- connected_ports = neutron_client.list_ports(device_id=server_id)['ports']
-
- # not counting networks connected by a connected port since allegedly
- # the connection should be on a separate port
- connected_ports_networks = {port['network_id'] for port in
- connected_ports if port['id'] not in port_ids}
- connected_ports_ids = {port['id'] for port in
- connected_ports}
- disconnected_networks = [network_id for network_id in network_ids if
- network_id not in connected_ports_networks]
- disconnected_ports = [port_id for port_id in port_ids if port_id not
- in connected_ports_ids]
- if disconnected_networks or disconnected_ports:
- raise NonRecoverableError(
- 'Expected external resources to be connected to external server {'
- '0}: Networks - {1}; Ports - {2}'.format(server_id,
- disconnected_networks,
- disconnected_ports))
-
-
-def _get_properties_by_node_instance_id(node_instance_id):
- client = get_rest_client()
- node_instance = client.node_instances.get(node_instance_id)
- node = client.nodes.get(ctx.deployment.id, node_instance.node_id)
- return node.properties
-
-
-@operation
-@with_nova_client
-def creation_validation(nova_client, args, **kwargs):
-
- def validate_server_property_value_exists(server_props, property_name):
- ctx.logger.debug(
- 'checking whether {0} exists...'.format(property_name))
-
- serv_props_copy = server_props.copy()
- try:
- handle_image_from_relationship(serv_props_copy, 'image', ctx)
- _handle_image_or_flavor(serv_props_copy, nova_client,
- property_name)
- except (NonRecoverableError, nova_exceptions.NotFound) as e:
- # temporary error - once image/flavor_name get removed, these
- # errors won't be relevant anymore
- err = str(e)
- ctx.logger.error('VALIDATION ERROR: ' + err)
- raise NonRecoverableError(err)
-
- prop_value_id = str(serv_props_copy[property_name])
- prop_values = list(nova_client.cosmo_list(property_name))
- for f in prop_values:
- if prop_value_id == f.id:
- ctx.logger.debug('OK: {0} exists'.format(property_name))
- return
- err = '{0} {1} does not exist'.format(property_name, prop_value_id)
- ctx.logger.error('VALIDATION ERROR: ' + err)
- if prop_values:
- ctx.logger.info('list of available {0}s:'.format(property_name))
- for f in prop_values:
- ctx.logger.info(' {0:>10} - {1}'.format(f.id, f.name))
- else:
- ctx.logger.info('there are no available {0}s'.format(
- property_name))
- raise NonRecoverableError(err)
-
- validate_resource(ctx, nova_client, SERVER_OPENSTACK_TYPE)
-
- server_props = dict(ctx.node.properties['server'], **args)
- validate_server_property_value_exists(server_props, 'flavor')
-
-
-def _get_private_key(private_key_path):
- pk_node_by_rel = \
- get_single_connected_node_by_openstack_type(
- ctx, KEYPAIR_OPENSTACK_TYPE, True)
-
- if private_key_path:
- if pk_node_by_rel:
- raise NonRecoverableError("server can't both have a "
- '"private_key_path" input and be '
- 'connected to a keypair via a '
- 'relationship at the same time')
- key_path = private_key_path
- else:
- if pk_node_by_rel and pk_node_by_rel.properties['private_key_path']:
- key_path = pk_node_by_rel.properties['private_key_path']
- else:
- key_path = ctx.bootstrap_context.cloudify_agent.agent_key_path
-
- if key_path:
- key_path = os.path.expanduser(key_path)
- if os.path.isfile(key_path):
- return key_path
-
- err_message = 'Cannot find private key file'
- if key_path:
- err_message += '; expected file path was {0}'.format(key_path)
- raise NonRecoverableError(err_message)
-
-
-def _validate_security_group_and_server_connection_status(
- nova_client, server_id, sg_id, sg_name, is_connected):
-
- # verifying the security group got connected or disconnected
- # successfully - this is due to Openstack concurrency issues that may
- # take place when attempting to connect/disconnect multiple SGs to the
- # same server at the same time
- server = nova_client.servers.get(server_id)
-
- if is_connected ^ any(sg for sg in server.list_security_group() if
- sg.id == sg_id):
- raise RecoverableError(
- message='Security group {0} did not get {2} server {1} '
- 'properly'
- .format(
- sg_name,
- server.name,
- 'connected to' if is_connected else 'disconnected from'))
-
-
-def _handle_image_or_flavor(server, nova_client, prop_name):
- if prop_name not in server and '{0}_name'.format(prop_name) not in server:
- # setting image or flavor - looking it up by name; if not found, then
- # the value is assumed to be the id
- server[prop_name] = ctx.node.properties[prop_name]
-
- # temporary error message: once the 'image' and 'flavor' properties
- # become mandatory, this will become less relevant
- if not server[prop_name]:
- raise NonRecoverableError(
- 'must set {0} by either setting a "{0}" property or by setting'
- ' a "{0}" or "{0}_name" (deprecated) field under the "server" '
- 'property'.format(prop_name))
-
- image_or_flavor = \
- nova_client.cosmo_get_if_exists(prop_name, name=server[prop_name])
- if image_or_flavor:
- server[prop_name] = image_or_flavor.id
- else: # Deprecated sugar
- if '{0}_name'.format(prop_name) in server:
- prop_name_plural = nova_client.cosmo_plural(prop_name)
- server[prop_name] = \
- getattr(nova_client, prop_name_plural).find(
- name=server['{0}_name'.format(prop_name)]).id
- del server['{0}_name'.format(prop_name)]
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/__init__.py
+++ /dev/null
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-keypair-validation-blueprint.yaml b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-keypair-validation-blueprint.yaml
deleted file mode 100644
index 22b7fb5362..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-keypair-validation-blueprint.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-tosca_definitions_version: cloudify_dsl_1_3
-
-imports:
- - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
- - plugin.yaml
-
-inputs:
- private_key: {}
- is_keypair_external: {}
-
-
-node_templates:
-
- keypair:
- type: cloudify.openstack.nodes.KeyPair
- properties:
- private_key_path: { get_input: private_key }
- use_external_resource: { get_input: is_keypair_external }
- openstack_config:
- username: aaa
- password: aaa
- tenant_name: aaa
- auth_url: aaa
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-server-create-secgroup.yaml b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-server-create-secgroup.yaml
deleted file mode 100644
index 70b75f6bf5..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-server-create-secgroup.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-tosca_definitions_version: cloudify_dsl_1_3
-
-imports:
- - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
- - plugin.yaml
-
-inputs:
- use_password:
- type: boolean
- default: false
-
-node_templates:
-
- security_group:
- type: cloudify.openstack.nodes.SecurityGroup
-
- server:
- type: cloudify.openstack.nodes.Server
- properties:
- install_agent: false
- use_password: { get_input: use_password }
- openstack_config:
- username: aaa
- password: aaa
- tenant_name: aaa
- auth_url: aaa
- server:
- key_name: 'aa'
- relationships:
- - type: cloudify.openstack.server_connected_to_security_group
- target: security_group
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-start-operation-retry-blueprint.yaml b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-start-operation-retry-blueprint.yaml
deleted file mode 100644
index 275806cf5a..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/resources/test-start-operation-retry-blueprint.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-tosca_definitions_version: cloudify_dsl_1_3
-
-imports:
- - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
- - plugin.yaml
-
-inputs:
- use_password:
- type: boolean
- default: false
-
-node_templates:
- server:
- type: cloudify.openstack.nodes.Server
- properties:
- install_agent: false
- use_password: { get_input: use_password }
- server:
- key_name: key
- scheduler_hints:
- group: affinity-group-id
- openstack_config:
- username: aaa
- password: aaa
- tenant_name: aaa
- auth_url: aaa
- interfaces:
- cloudify.interfaces.lifecycle:
- start:
- inputs:
- start_retry_interval: 1
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_relationships.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_relationships.py
deleted file mode 100644
index 2814057fb7..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_relationships.py
+++ /dev/null
@@ -1,228 +0,0 @@
-#########
-# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-"""Test the functions related to retrieving relationship information.
-
-Functions under test are mostly inside openstack_plugin_common:
-get_relationships_by_openstack_type
-get_connected_nodes_by_openstack_type
-get_openstack_ids_of_connected_nodes_by_openstack_type
-get_single_connected_node_by_openstack_type
-"""
-
-import uuid
-from unittest import TestCase
-
-from neutron_plugin.network import NETWORK_OPENSTACK_TYPE
-
-from cloudify.exceptions import NonRecoverableError
-
-from cloudify.mocks import (
- MockCloudifyContext,
- MockNodeContext,
- MockNodeInstanceContext,
- MockRelationshipContext,
- MockRelationshipSubjectContext,
-)
-from openstack_plugin_common import (
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- get_openstack_id_of_single_connected_node_by_openstack_type,
- get_openstack_ids_of_connected_nodes_by_openstack_type,
- get_relationships_by_openstack_type,
- get_single_connected_node_by_openstack_type,
-)
-
-
-class RelationshipsTestBase(TestCase):
- def _make_vm_ctx_with_relationships(self, rel_specs, properties=None):
- """Prepare a mock CloudifyContext from the given relationship spec.
-
- rel_specs is an ordered collection of relationship specs - dicts
- with the keys "node" and "instance" used to construct the
- MockNodeContext and the MockNodeInstanceContext, and optionally a
- "type" key.
- Examples: [
- {},
- {"node": {"id": 5}},
- {
- "type": "some_type",
- "instance": {
- "id": 3,
- "runtime_properties":{}
- }
- }
- ]
- """
- if properties is None:
- properties = {}
- relationships = []
- for rel_spec in rel_specs:
- node = rel_spec.get('node', {})
- node_id = node.pop('id', uuid.uuid4().hex)
-
- instance = rel_spec.get('instance', {})
- instance_id = instance.pop('id', '{0}_{1}'.format(
- node_id, uuid.uuid4().hex))
- if 'properties' not in node:
- node['properties'] = {}
- node_ctx = MockNodeContext(id=node_id, **node)
- instance_ctx = MockNodeInstanceContext(id=instance_id, **instance)
-
- rel_subject_ctx = MockRelationshipSubjectContext(
- node=node_ctx, instance=instance_ctx)
- rel_type = rel_spec.get('type')
- rel_ctx = MockRelationshipContext(target=rel_subject_ctx,
- type=rel_type)
- relationships.append(rel_ctx)
- return MockCloudifyContext(node_id='vm', properties=properties,
- relationships=relationships)
-
-
-class TestGettingRelatedResources(RelationshipsTestBase):
-
- def test_get_relationships_finds_all_by_type(self):
- """get_relationships_by_openstack_type returns all rels that match."""
- rel_specs = [{
- 'instance': {
- 'id': instance_id,
- 'runtime_properties': {
- OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE
- }
- }
- } for instance_id in range(3)]
-
- rel_specs.append({
- 'instance': {
- 'runtime_properties': {
- OPENSTACK_TYPE_PROPERTY: 'something else'
- }
- }
- })
-
- ctx = self._make_vm_ctx_with_relationships(rel_specs)
- filtered = get_relationships_by_openstack_type(ctx,
- NETWORK_OPENSTACK_TYPE)
- self.assertEqual(3, len(filtered))
-
- def test_get_ids_of_nodes_by_type(self):
-
- rel_spec = {
- 'instance': {
- 'runtime_properties': {
- OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE,
- OPENSTACK_ID_PROPERTY: 'the node id'
- }
- }
- }
- ctx = self._make_vm_ctx_with_relationships([rel_spec])
- ids = get_openstack_ids_of_connected_nodes_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE)
- self.assertEqual(['the node id'], ids)
-
-
-class TestGetSingleByID(RelationshipsTestBase):
- def _make_instances(self, ids):
- """Mock a context with relationships to instances with given ids."""
- rel_specs = [{
- 'node': {
- 'id': node_id
- },
- 'instance': {
- 'runtime_properties': {
- OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE,
- OPENSTACK_ID_PROPERTY: node_id
- }
- }
- } for node_id in ids]
- return self._make_vm_ctx_with_relationships(rel_specs)
-
- def test_get_single_id(self):
- ctx = self._make_instances(['the node id'])
- found_id = get_openstack_id_of_single_connected_node_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE)
- self.assertEqual('the node id', found_id)
-
- def test_get_single_id_two_found(self):
- ctx = self._make_instances([0, 1])
- self.assertRaises(
- NonRecoverableError,
- get_openstack_id_of_single_connected_node_by_openstack_type, ctx,
- NETWORK_OPENSTACK_TYPE)
-
- def test_get_single_id_two_found_if_exists_true(self):
- ctx = self._make_instances([0, 1])
-
- try:
- get_openstack_id_of_single_connected_node_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE, if_exists=True)
- except NonRecoverableError as e:
- self.assertIn(NETWORK_OPENSTACK_TYPE, e.message)
- else:
- self.fail()
-
- def test_get_single_id_if_exists_none_found(self):
- ctx = self._make_instances([])
- found = get_openstack_id_of_single_connected_node_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE, if_exists=True)
- self.assertIsNone(found)
-
- def test_get_single_id_none_found(self):
- rel_spec = []
- ctx = self._make_vm_ctx_with_relationships(rel_spec)
- self.assertRaises(
- NonRecoverableError,
- get_openstack_id_of_single_connected_node_by_openstack_type,
- ctx,
- NETWORK_OPENSTACK_TYPE)
-
- def test_get_single_node(self):
- ctx = self._make_instances(['the node id'])
- found_node = get_single_connected_node_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE)
- self.assertEqual('the node id', found_node.id)
-
- def test_get_single_node_two_found(self):
- ctx = self._make_instances([0, 1])
- self.assertRaises(
- NonRecoverableError,
- get_single_connected_node_by_openstack_type,
- ctx, NETWORK_OPENSTACK_TYPE)
-
- def test_get_single_node_two_found_if_exists(self):
- ctx = self._make_instances([0, 1])
-
- self.assertRaises(
- NonRecoverableError,
- get_single_connected_node_by_openstack_type,
- ctx,
- NETWORK_OPENSTACK_TYPE,
- if_exists=True)
-
- def test_get_single_node_if_exists_none_found(self):
- ctx = self._make_instances([])
-
- found = get_single_connected_node_by_openstack_type(
- ctx, NETWORK_OPENSTACK_TYPE, if_exists=True)
- self.assertIsNone(found)
-
- def test_get_single_node_none_found(self):
- ctx = self._make_instances([])
-
- self.assertRaises(
- NonRecoverableError,
- get_single_connected_node_by_openstack_type,
- ctx,
- NETWORK_OPENSTACK_TYPE)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_server.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_server.py
deleted file mode 100644
index a50930555c..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_server.py
+++ /dev/null
@@ -1,551 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from os import path
-import tempfile
-
-import unittest
-import mock
-
-import nova_plugin
-from cloudify.test_utils import workflow_test
-
-from openstack_plugin_common import NeutronClientWithSugar, \
- OPENSTACK_TYPE_PROPERTY, OPENSTACK_ID_PROPERTY
-from neutron_plugin.network import NETWORK_OPENSTACK_TYPE
-from neutron_plugin.port import PORT_OPENSTACK_TYPE
-from nova_plugin.tests.test_relationships import RelationshipsTestBase
-from nova_plugin.server import _prepare_server_nics
-from cinder_plugin.volume import VOLUME_OPENSTACK_TYPE
-from cloudify.exceptions import NonRecoverableError
-from cloudify.state import current_ctx
-
-from cloudify.utils import setup_logger
-
-from cloudify.mocks import (
- MockNodeContext,
- MockCloudifyContext,
- MockNodeInstanceContext,
- MockRelationshipContext,
- MockRelationshipSubjectContext
-)
-
-
-class TestServer(unittest.TestCase):
-
- blueprint_path = path.join('resources',
- 'test-start-operation-retry-blueprint.yaml')
-
- @mock.patch('nova_plugin.server.create')
- @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties')
- @workflow_test(blueprint_path, copy_plugin_yaml=True)
- def test_nova_server_lifecycle_start(self, cfy_local, *_):
-
- test_vars = {
- 'counter': 0,
- 'server': mock.MagicMock()
- }
-
- def mock_get_server_by_context(*_):
- s = test_vars['server']
- if test_vars['counter'] == 0:
- s.status = nova_plugin.server.SERVER_STATUS_BUILD
- else:
- s.status = nova_plugin.server.SERVER_STATUS_ACTIVE
- test_vars['counter'] += 1
- return s
-
- with mock.patch('nova_plugin.server.get_server_by_context',
- new=mock_get_server_by_context):
- cfy_local.execute('install', task_retries=3)
-
- self.assertEqual(2, test_vars['counter'])
- self.assertEqual(0, test_vars['server'].start.call_count)
-
- @workflow_test(blueprint_path, copy_plugin_yaml=True)
- @mock.patch('nova_plugin.server.create')
- @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties')
- def test_nova_server_lifecycle_start_after_stop(self, cfy_local, *_):
-
- test_vars = {
- 'counter': 0,
- 'server': mock.MagicMock()
- }
-
- def mock_get_server_by_context(_):
- s = test_vars['server']
- if test_vars['counter'] == 0:
- s.status = nova_plugin.server.SERVER_STATUS_SHUTOFF
- elif test_vars['counter'] == 1:
- setattr(s,
- nova_plugin.server.OS_EXT_STS_TASK_STATE,
- nova_plugin.server.SERVER_TASK_STATE_POWERING_ON)
- else:
- s.status = nova_plugin.server.SERVER_STATUS_ACTIVE
- test_vars['counter'] += 1
- test_vars['server'] = s
- return s
-
- with mock.patch('nova_plugin.server.get_server_by_context',
- new=mock_get_server_by_context):
- cfy_local.execute('install', task_retries=3)
-
- self.assertEqual(1, test_vars['server'].start.call_count)
- self.assertEqual(3, test_vars['counter'])
-
- @workflow_test(blueprint_path, copy_plugin_yaml=True)
- @mock.patch('nova_plugin.server.create')
- @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties')
- def test_nova_server_lifecycle_start_unknown_status(self, cfy_local, *_):
- test_vars = {
- 'counter': 0,
- 'server': mock.MagicMock()
- }
-
- def mock_get_server_by_context(_):
- s = test_vars['server']
- if test_vars['counter'] == 0:
- s.status = '### unknown-status ###'
- test_vars['counter'] += 1
- test_vars['server'] = s
- return s
-
- with mock.patch('nova_plugin.server.get_server_by_context',
- new=mock_get_server_by_context):
- self.assertRaisesRegexp(RuntimeError,
- 'Unexpected server state',
- cfy_local.execute,
- 'install')
-
- self.assertEqual(0, test_vars['server'].start.call_count)
- self.assertEqual(1, test_vars['counter'])
-
- @workflow_test(blueprint_path, copy_plugin_yaml=True)
- @mock.patch('nova_plugin.server.start')
- @mock.patch('nova_plugin.server._handle_image_or_flavor')
- @mock.patch('nova_plugin.server._fail_on_missing_required_parameters')
- @mock.patch('openstack_plugin_common.nova_client')
- def test_nova_server_creation_param_integrity(
- self, cfy_local, mock_nova, *args):
- cfy_local.execute('install', task_retries=0)
- calls = mock_nova.Client.return_value.servers.method_calls
- self.assertEqual(1, len(calls))
- kws = calls[0][2]
- self.assertIn('scheduler_hints', kws)
- self.assertEqual(kws['scheduler_hints'],
- {'group': 'affinity-group-id'},
- 'expecting \'scheduler_hints\' value to exist')
-
- @workflow_test(blueprint_path, copy_plugin_yaml=True,
- inputs={'use_password': True})
- @mock.patch('nova_plugin.server.create')
- @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties')
- @mock.patch(
- 'nova_plugin.server.get_single_connected_node_by_openstack_type',
- autospec=True, return_value=None)
- def test_nova_server_with_use_password(self, cfy_local, *_):
-
- test_vars = {
- 'counter': 0,
- 'server': mock.MagicMock()
- }
-
- tmp_path = tempfile.NamedTemporaryFile(prefix='key_name')
- key_path = tmp_path.name
-
- def mock_get_server_by_context(_):
- s = test_vars['server']
- if test_vars['counter'] == 0:
- s.status = nova_plugin.server.SERVER_STATUS_BUILD
- else:
- s.status = nova_plugin.server.SERVER_STATUS_ACTIVE
- test_vars['counter'] += 1
-
- def check_agent_key_path(private_key):
- self.assertEqual(private_key, key_path)
- return private_key
-
- s.get_password = check_agent_key_path
- return s
-
- with mock.patch('nova_plugin.server.get_server_by_context',
- mock_get_server_by_context):
- with mock.patch(
- 'cloudify.context.BootstrapContext.'
- 'CloudifyAgent.agent_key_path',
- new_callable=mock.PropertyMock, return_value=key_path):
- cfy_local.execute('install', task_retries=5)
-
-
-class TestMergeNICs(unittest.TestCase):
- def test_merge_prepends_management_network(self):
- """When the mgmt network isnt in a relationship, its the 1st nic."""
- mgmt_network_id = 'management network'
- nics = [{'net-id': 'other network'}]
-
- merged = nova_plugin.server._merge_nics(mgmt_network_id, nics)
-
- self.assertEqual(len(merged), 2)
- self.assertEqual(merged[0]['net-id'], 'management network')
-
- def test_management_network_in_relationships(self):
- """When the mgmt network was in a relationship, it's not prepended."""
- mgmt_network_id = 'management network'
- nics = [{'net-id': 'other network'}, {'net-id': 'management network'}]
-
- merged = nova_plugin.server._merge_nics(mgmt_network_id, nics)
-
- self.assertEqual(nics, merged)
-
-
-class TestNormalizeNICs(unittest.TestCase):
- def test_normalize_port_priority(self):
- """Whe there's both net-id and port-id, port-id is used."""
- nics = [{'net-id': '1'}, {'port-id': '2'}, {'net-id': 3, 'port-id': 4}]
- normalized = nova_plugin.server._normalize_nics(nics)
- expected = [{'net-id': '1'}, {'port-id': '2'}, {'port-id': 4}]
- self.assertEqual(expected, normalized)
-
-
-class MockNeutronClient(NeutronClientWithSugar):
- """A fake neutron client with hard-coded test data."""
-
- @mock.patch('openstack_plugin_common.OpenStackClient.__init__',
- new=mock.Mock())
- def __init__(self):
- super(MockNeutronClient, self).__init__()
-
- @staticmethod
- def _search_filter(objs, search_params):
- """Mock neutron's filtering by attributes in list_* methods.
-
- list_* methods (list_networks, list_ports)
- """
- def _matches(obj, search_params):
- return all(obj[k] == v for k, v in search_params.items())
- return [obj for obj in objs if _matches(obj, search_params)]
-
- def list_networks(self, **search_params):
- networks = [
- {'name': 'network1', 'id': '1'},
- {'name': 'network2', 'id': '2'},
- {'name': 'network3', 'id': '3'},
- {'name': 'network4', 'id': '4'},
- {'name': 'network5', 'id': '5'},
- {'name': 'network6', 'id': '6'},
- {'name': 'other', 'id': 'other'}
- ]
- return {'networks': self._search_filter(networks, search_params)}
-
- def list_ports(self, **search_params):
- ports = [
- {'name': 'port1', 'id': '1', 'network_id': '1'},
- {'name': 'port2', 'id': '2', 'network_id': '1'},
- {'name': 'port3', 'id': '3', 'network_id': '2'},
- {'name': 'port4', 'id': '4', 'network_id': '2'},
- ]
- return {'ports': self._search_filter(ports, search_params)}
-
- def show_port(self, port_id):
- ports = self.list_ports(id=port_id)
- return {'port': ports['ports'][0]}
-
-
-class NICTestBase(RelationshipsTestBase):
- """Base test class for the NICs tests.
-
- It comes with helper methods to create a mock cloudify context, with
- the specified relationships.
- """
- mock_neutron = MockNeutronClient()
-
- def _relationship_spec(self, obj, objtype):
- return {'node': {'properties': obj},
- 'instance': {
- 'runtime_properties': {OPENSTACK_TYPE_PROPERTY: objtype,
- OPENSTACK_ID_PROPERTY: obj['id']}}}
-
- def _make_vm_ctx_with_ports(self, management_network_name, ports):
- port_specs = [self._relationship_spec(obj, PORT_OPENSTACK_TYPE)
- for obj in ports]
- vm_properties = {'management_network_name': management_network_name}
- return self._make_vm_ctx_with_relationships(port_specs,
- vm_properties)
-
- def _make_vm_ctx_with_networks(self, management_network_name, networks):
- network_specs = [self._relationship_spec(obj, NETWORK_OPENSTACK_TYPE)
- for obj in networks]
- vm_properties = {'management_network_name': management_network_name}
- return self._make_vm_ctx_with_relationships(network_specs,
- vm_properties)
-
-
-class TestServerNICs(NICTestBase):
- """Test preparing the NICs list from server<->network relationships.
-
- Each test creates a cloudify context that represents a openstack VM
- with relationships to networks. Then, examine the NICs list produced from
- the relationships.
- """
- def test_nova_server_creation_nics_ordering(self):
- """NIC list keeps the order of the relationships.
-
- The nics= list passed to nova.server.create should be ordered
- depending on the relationships to the networks (as defined in the
- blueprint).
- """
- ctx = self._make_vm_ctx_with_networks(
- management_network_name='network1',
- networks=[
- {'id': '1'},
- {'id': '2'},
- {'id': '3'},
- {'id': '4'},
- {'id': '5'},
- {'id': '6'},
- ])
- server = {'meta': {}}
-
- _prepare_server_nics(
- self.mock_neutron, ctx, server)
-
- self.assertEqual(
- ['1', '2', '3', '4', '5', '6'],
- [n['net-id'] for n in server['nics']])
-
- def test_server_creation_prepends_mgmt_network(self):
- """If the management network isn't in a relation, it's the first NIC.
-
- Creating the server examines the relationships, and if it doesn't find
- a relationship to the management network, it adds the management
- network to the NICs list, as the first element.
- """
- ctx = self._make_vm_ctx_with_networks(
- management_network_name='other',
- networks=[
- {'id': '1'},
- {'id': '2'},
- {'id': '3'},
- {'id': '4'},
- {'id': '5'},
- {'id': '6'},
- ])
- server = {'meta': {}}
-
- _prepare_server_nics(
- self.mock_neutron, ctx, server)
-
- first_nic = server['nics'][0]
- self.assertEqual('other', first_nic['net-id'])
- self.assertEqual(7, len(server['nics']))
-
- def test_server_creation_uses_relation_mgmt_nic(self):
- """If the management network is in a relation, it isn't prepended.
-
- If the server has a relationship to the management network,
- a new NIC isn't prepended to the list.
- """
- ctx = self._make_vm_ctx_with_networks(
- management_network_name='network1',
- networks=[
- {'id': '1'},
- {'id': '2'},
- {'id': '3'},
- {'id': '4'},
- {'id': '5'},
- {'id': '6'},
- ])
- server = {'meta': {}}
-
- _prepare_server_nics(
- self.mock_neutron, ctx, server)
- self.assertEqual(6, len(server['nics']))
-
-
-class TestServerPortNICs(NICTestBase):
- """Test preparing the NICs list from server<->port relationships.
-
- Create a cloudify ctx representing a vm with relationships to
- openstack ports. Then examine the resulting NICs list: check that it
- contains the networks that the ports were connected to, and that each
- connection uses the port that was provided.
- """
-
- def test_network_with_port(self):
- """Port on the management network is used to connect to it.
-
- The NICs list entry for the management network contains the
- port-id of the port from the relationship, but doesn't contain net-id.
- """
- ports = [{'id': '1'}]
- ctx = self._make_vm_ctx_with_ports('network1', ports)
- server = {'meta': {}}
-
- _prepare_server_nics(
- self.mock_neutron, ctx, server)
-
- self.assertEqual([{'port-id': '1'}], server['nics'])
-
- def test_port_not_to_mgmt_network(self):
- """A NICs list entry is added with the network and the port.
-
- A relationship to a port must not only add a NIC, but the NIC must
- also make sure to use that port.
- """
- ports = [{'id': '1'}]
- ctx = self._make_vm_ctx_with_ports('other', ports)
- server = {'meta': {}}
-
- _prepare_server_nics(
- self.mock_neutron, ctx, server)
- expected = [
- {'net-id': 'other'},
- {'port-id': '1'}
- ]
- self.assertEqual(expected, server['nics'])
-
-
-class TestBootFromVolume(unittest.TestCase):
-
- @mock.patch('nova_plugin.server._get_boot_volume_relationships',
- autospec=True)
- def test_handle_boot_volume(self, mock_get_rels):
- mock_get_rels.return_value.runtime_properties = {
- 'external_id': 'test-id',
- 'availability_zone': 'test-az',
- }
- server = {}
- ctx = mock.MagicMock()
- nova_plugin.server._handle_boot_volume(server, ctx)
- self.assertEqual({'vda': 'test-id:::0'},
- server['block_device_mapping'])
- self.assertEqual('test-az',
- server['availability_zone'])
-
- @mock.patch('nova_plugin.server._get_boot_volume_relationships',
- autospec=True, return_value=[])
- def test_handle_boot_volume_no_boot_volume(self, *_):
- server = {}
- ctx = mock.MagicMock()
- nova_plugin.server._handle_boot_volume(server, ctx)
- self.assertNotIn('block_device_mapping', server)
-
-
-class TestImageFromRelationships(unittest.TestCase):
-
- @mock.patch('glance_plugin.image.'
- 'get_openstack_ids_of_connected_nodes_by_openstack_type',
- autospec=True, return_value=['test-id'])
- def test_handle_boot_image(self, *_):
- server = {}
- ctx = mock.MagicMock()
- nova_plugin.server.handle_image_from_relationship(server, 'image', ctx)
- self.assertEqual({'image': 'test-id'}, server)
-
- @mock.patch('glance_plugin.image.'
- 'get_openstack_ids_of_connected_nodes_by_openstack_type',
- autospec=True, return_value=[])
- def test_handle_boot_image_no_image(self, *_):
- server = {}
- ctx = mock.MagicMock()
- nova_plugin.server.handle_image_from_relationship(server, 'image', ctx)
- self.assertNotIn('image', server)
-
-
-class TestServerRelationships(unittest.TestCase):
-
- def _get_ctx_mock(self, instance_id, boot):
- rel_specs = [MockRelationshipContext(
- target=MockRelationshipSubjectContext(node=MockNodeContext(
- properties={'boot': boot}), instance=MockNodeInstanceContext(
- runtime_properties={
- OPENSTACK_TYPE_PROPERTY: VOLUME_OPENSTACK_TYPE,
- OPENSTACK_ID_PROPERTY: instance_id
- })))]
- ctx = mock.MagicMock()
- ctx.instance = MockNodeInstanceContext(relationships=rel_specs)
- ctx.logger = setup_logger('mock-logger')
- return ctx
-
- def test_boot_volume_relationship(self):
- instance_id = 'test-id'
- ctx = self._get_ctx_mock(instance_id, True)
- result = nova_plugin.server._get_boot_volume_relationships(
- VOLUME_OPENSTACK_TYPE, ctx)
- self.assertEqual(
- instance_id,
- result.runtime_properties['external_id'])
-
- def test_no_boot_volume_relationship(self):
- instance_id = 'test-id'
- ctx = self._get_ctx_mock(instance_id, False)
- result = nova_plugin.server._get_boot_volume_relationships(
- VOLUME_OPENSTACK_TYPE, ctx)
- self.assertFalse(result)
-
-
-class TestServerNetworkRuntimeProperties(unittest.TestCase):
-
- @property
- def mock_ctx(self):
- return MockCloudifyContext(
- node_id='test',
- deployment_id='test',
- properties={},
- operation={'retry_number': 0},
- provider_context={'resources': {}}
- )
-
- def test_server_networks_runtime_properties_empty_server(self):
- ctx = self.mock_ctx
- current_ctx.set(ctx=ctx)
- server = mock.MagicMock()
- setattr(server, 'networks', {})
- with self.assertRaisesRegexp(
- NonRecoverableError,
- 'The server was created but not attached to a network.'):
- nova_plugin.server._set_network_and_ip_runtime_properties(server)
-
- def test_server_networks_runtime_properties_valid_networks(self):
- ctx = self.mock_ctx
- current_ctx.set(ctx=ctx)
- server = mock.MagicMock()
- network_id = 'management_network'
- network_ips = ['good', 'bad1', 'bad2']
- setattr(server,
- 'networks',
- {network_id: network_ips})
- nova_plugin.server._set_network_and_ip_runtime_properties(server)
- self.assertIn('networks', ctx.instance.runtime_properties.keys())
- self.assertIn('ip', ctx.instance.runtime_properties.keys())
- self.assertEquals(ctx.instance.runtime_properties['ip'], 'good')
- self.assertEquals(ctx.instance.runtime_properties['networks'],
- {network_id: network_ips})
-
- def test_server_networks_runtime_properties_empty_networks(self):
- ctx = self.mock_ctx
- current_ctx.set(ctx=ctx)
- server = mock.MagicMock()
- network_id = 'management_network'
- network_ips = []
- setattr(server,
- 'networks',
- {network_id: network_ips})
- nova_plugin.server._set_network_and_ip_runtime_properties(server)
- self.assertIn('networks', ctx.instance.runtime_properties.keys())
- self.assertIn('ip', ctx.instance.runtime_properties.keys())
- self.assertEquals(ctx.instance.runtime_properties['ip'], None)
- self.assertEquals(ctx.instance.runtime_properties['networks'],
- {network_id: network_ips})
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_server_image_and_flavor.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_server_image_and_flavor.py
deleted file mode 100644
index 2ae475843c..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_server_image_and_flavor.py
+++ /dev/null
@@ -1,228 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-
-import unittest
-
-import mock
-from novaclient import exceptions as nova_exceptions
-
-import nova_plugin.server as server
-from cloudify.exceptions import NonRecoverableError
-from cloudify.mocks import MockCloudifyContext
-
-
-class TestServerImageAndFlavor(unittest.TestCase):
-
- def test_no_image_and_no_flavor(self):
- node_props = {
- 'image': '',
- 'flavor': ''
- }
- with mock.patch('nova_plugin.server.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
- nova_client = self._get_mocked_nova_client()
-
- serv = {}
- self.assertRaises(NonRecoverableError,
- server._handle_image_or_flavor,
- serv, nova_client, 'image')
- self.assertRaises(NonRecoverableError,
- server._handle_image_or_flavor,
- serv, nova_client, 'flavor')
-
- def test_image_and_flavor_properties_as_names(self):
- node_props = {
- 'image': 'some-image-name',
- 'flavor': 'some-flavor-name'
- }
- with mock.patch('nova_plugin.server.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
- nova_client = self._get_mocked_nova_client()
-
- serv = {}
- server._handle_image_or_flavor(serv, nova_client, 'image')
- server._handle_image_or_flavor(serv, nova_client, 'flavor')
-
- self.assertEquals('some-image-id', serv.get('image'))
- self.assertEquals('some-flavor-id', serv.get('flavor'))
-
- def test_image_and_flavor_properties_as_ids(self):
- node_props = {
- 'image': 'some-image-id',
- 'flavor': 'some-flavor-id'
- }
- with mock.patch('nova_plugin.server.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
- nova_client = self._get_mocked_nova_client()
-
- serv = {}
- server._handle_image_or_flavor(serv, nova_client, 'image')
- server._handle_image_or_flavor(serv, nova_client, 'flavor')
-
- self.assertEquals('some-image-id', serv.get('image'))
- self.assertEquals('some-flavor-id', serv.get('flavor'))
-
- def test_image_id_and_flavor_id(self):
- node_props = {
- 'image': '',
- 'flavor': ''
- }
- with mock.patch('nova_plugin.server.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
- nova_client = self._get_mocked_nova_client()
-
- serv = {}
- serv['image'] = 'some-image-id'
- serv['flavor'] = 'some-flavor-id'
- server._handle_image_or_flavor(serv, nova_client, 'image')
- server._handle_image_or_flavor(serv, nova_client, 'flavor')
-
- self.assertEquals('some-image-id', serv.get('image'))
- self.assertEquals('some-flavor-id', serv.get('flavor'))
-
- def test_image_name_and_flavor_name(self):
- node_props = {
- 'image': '',
- 'flavor': ''
- }
- with mock.patch('nova_plugin.server.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
- nova_client = self._get_mocked_nova_client()
-
- serv = {}
- serv['image_name'] = 'some-image-name'
- serv['flavor_name'] = 'some-flavor-name'
- server._handle_image_or_flavor(serv, nova_client, 'image')
- server._handle_image_or_flavor(serv, nova_client, 'flavor')
-
- self.assertEquals('some-image-id', serv.get('image'))
- self.assertNotIn('image_name', serv)
- self.assertEquals('some-flavor-id', serv.get('flavor'))
- self.assertNotIn('flavor_name', serv)
-
- def test_unknown_image_name_and_flavor_name(self):
- node_props = {
- 'image': '',
- 'flavor': ''
- }
- with mock.patch('nova_plugin.server.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
- nova_client = self._get_mocked_nova_client()
-
- serv = {}
- serv['image_name'] = 'some-unknown-image-name'
- serv['flavor_name'] = 'some-unknown-flavor-name'
-
- self.assertRaises(nova_exceptions.NotFound,
- server._handle_image_or_flavor,
- serv, nova_client, 'image')
- self.assertRaises(nova_exceptions.NotFound,
- server._handle_image_or_flavor,
- serv, nova_client, 'flavor')
-
- def test_image_id_and_flavor_id_override_on_properties(self):
- node_props = {
- 'image': 'properties-image-id',
- 'flavor': 'properties-flavor-id'
- }
- with mock.patch('nova_plugin.server.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
- nova_client = self._get_mocked_nova_client()
-
- serv = {}
- serv['image'] = 'some-image-id'
- serv['flavor'] = 'some-flavor-id'
- server._handle_image_or_flavor(serv, nova_client, 'image')
- server._handle_image_or_flavor(serv, nova_client, 'flavor')
-
- self.assertEquals('some-image-id', serv.get('image'))
- self.assertEquals('some-flavor-id', serv.get('flavor'))
-
- def test_image_name_and_flavor_name_override_on_properties(self):
- node_props = {
- 'image': 'properties-image-id',
- 'flavor': 'properties-flavor-id'
- }
- with mock.patch('nova_plugin.server.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
- nova_client = self._get_mocked_nova_client()
-
- serv = {}
- serv['image_name'] = 'some-image-name'
- serv['flavor_name'] = 'some-flavor-name'
- server._handle_image_or_flavor(serv, nova_client, 'image')
- server._handle_image_or_flavor(serv, nova_client, 'flavor')
-
- self.assertEquals('some-image-id', serv.get('image'))
- self.assertNotIn('image_name', serv)
- self.assertEquals('some-flavor-id', serv.get('flavor'))
- self.assertNotIn('flavor_name', serv)
-
- def test_image_name_and_flavor_name_override_on_image_and_flavor_ids(self):
- node_props = {
- 'image': '',
- 'flavor': ''
- }
- with mock.patch('nova_plugin.server.ctx',
- self._get_mock_ctx_with_node_properties(node_props)):
- nova_client = self._get_mocked_nova_client()
-
- serv = {}
- serv['image'] = 'some-bad-image-id'
- serv['image_name'] = 'some-image-name'
- serv['flavor'] = 'some-bad-flavor-id'
- serv['flavor_name'] = 'some-flavor-name'
- server._handle_image_or_flavor(serv, nova_client, 'image')
- server._handle_image_or_flavor(serv, nova_client, 'flavor')
-
- self.assertEquals('some-image-id', serv.get('image'))
- self.assertNotIn('image_name', serv)
- self.assertEquals('some-flavor-id', serv.get('flavor'))
- self.assertNotIn('flavor_name', serv)
-
- @staticmethod
- def _get_mocked_nova_client():
- nova_client = mock.MagicMock()
-
- def mock_get_if_exists(prop_name, **kwargs):
- is_image = prop_name == 'image'
- searched_name = kwargs.get('name')
- if (is_image and searched_name == 'some-image-name') or \
- (not is_image and searched_name == 'some-flavor-name'):
- result = mock.MagicMock()
- result.id = 'some-image-id' if \
- is_image else 'some-flavor-id'
- return result
- return []
-
- def mock_find_generator(prop_name):
- def mock_find(**kwargs):
- result = mock_get_if_exists(prop_name, **kwargs)
- if not result:
- raise nova_exceptions.NotFound(404)
- return result
- return mock_find
-
- nova_client.cosmo_plural = lambda x: '{0}s'.format(x)
- nova_client.cosmo_get_if_exists = mock_get_if_exists
- nova_client.images.find = mock_find_generator('image')
- nova_client.flavors.find = mock_find_generator('flavor')
- return nova_client
-
- @staticmethod
- def _get_mock_ctx_with_node_properties(properties):
- return MockCloudifyContext(node_id='test_node_id',
- properties=properties)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_userdata.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_userdata.py
deleted file mode 100644
index d7f056d72c..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_userdata.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#########
-# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import unittest
-
-import mock
-
-from cloudify.mocks import MockCloudifyContext
-
-from nova_plugin import userdata
-
-
-def ctx_mock():
- result = MockCloudifyContext(
- node_id='d',
- properties={})
- result.node.type_hierarchy = ['cloudify.nodes.Compute']
- return result
-
-
-class TestServerUserdataHandling(unittest.TestCase):
-
- @mock.patch('nova_plugin.userdata.ctx', ctx_mock())
- def test_no_userdata(self):
- server_conf = {}
- userdata.handle_userdata(server_conf)
- self.assertEqual(server_conf, {})
-
- def test_agent_installation_userdata(self):
- ctx = ctx_mock()
- ctx.agent.init_script = lambda: 'SCRIPT'
- with mock.patch('nova_plugin.userdata.ctx', ctx):
- server_conf = {}
- userdata.handle_userdata(server_conf)
- self.assertEqual(server_conf, {'userdata': 'SCRIPT'})
-
- @mock.patch('nova_plugin.userdata.ctx', ctx_mock())
- def test_existing_userdata(self):
- server_conf = {'userdata': 'EXISTING'}
- server_conf_copy = server_conf.copy()
- userdata.handle_userdata(server_conf)
- self.assertEqual(server_conf, server_conf_copy)
-
- def test_existing_and_agent_installation_userdata(self):
- ctx = ctx_mock()
- ctx.agent.init_script = lambda: '#! SCRIPT'
- with mock.patch('nova_plugin.userdata.ctx', ctx):
- server_conf = {'userdata': '#! EXISTING'}
- userdata.handle_userdata(server_conf)
- self.assertTrue(server_conf['userdata'].startswith(
- 'Content-Type: multi'))
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_validation.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_validation.py
deleted file mode 100644
index aa1dfdd814..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/tests/test_validation.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#########
-# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import os
-from os import path
-import tempfile
-import shutil
-
-import unittest
-import mock
-
-from cloudify.test_utils import workflow_test
-from nova_plugin.keypair import creation_validation
-from cloudify.exceptions import NonRecoverableError
-
-PRIVATE_KEY_NAME = 'private_key'
-
-
-class TestValidation(unittest.TestCase):
-
- blueprint_path = path.join('resources',
- 'test-keypair-validation-blueprint.yaml')
-
- def setUp(self):
- _, fp = tempfile.mkstemp()
- self.private_key = fp
- _, fp = tempfile.mkstemp()
- self.not_readable_private_key = fp
- os.chmod(self.not_readable_private_key, 0o200)
- self.temp_dir = tempfile.mkdtemp()
- self.not_writable_temp_dir_r = tempfile.mkdtemp()
- os.chmod(self.not_writable_temp_dir_r, 0o400)
- self.not_writable_temp_dir_rx = tempfile.mkdtemp()
- os.chmod(self.not_writable_temp_dir_rx, 0o500)
- self.not_writable_temp_dir_rw = tempfile.mkdtemp()
- os.chmod(self.not_writable_temp_dir_rw, 0o600)
-
- def tearDown(self):
- if self.private_key:
- os.remove(self.private_key)
-
- if self.not_readable_private_key:
- os.remove(self.not_readable_private_key)
-
- shutil.rmtree(self.not_writable_temp_dir_r, ignore_errors=True)
- shutil.rmtree(self.not_writable_temp_dir_rx, ignore_errors=True)
- shutil.rmtree(self.not_writable_temp_dir_rw, ignore_errors=True)
- shutil.rmtree(self.temp_dir, ignore_errors=True)
-
- def new_keypair_create(self, *args, **kwargs):
- creation_validation(*args, **kwargs)
-
- def new_keypair_create_with_exception(self, *args, **kwargs):
- self.assertRaises(NonRecoverableError, creation_validation,
- *args, **kwargs)
-
- def get_keypair_inputs_private_key(self, is_external, **kwargs):
- return {
- 'private_key': self.private_key,
- 'is_keypair_external': is_external
- }
-
- def get_keypair_inputs_not_readable_private_key(self,
- is_external, **kwargs):
- return {
- 'private_key': self.not_readable_private_key,
- 'is_keypair_external': is_external
- }
-
- def get_keypair_inputs_not_writable_dir_r(self, is_external, **kwargs):
- return {
- 'private_key': path.join(self.not_writable_temp_dir_r,
- PRIVATE_KEY_NAME),
- 'is_keypair_external': is_external
- }
-
- def get_keypair_inputs_not_writable_dir_rx(self, is_external, **kwargs):
- return {
- 'private_key': path.join(self.not_writable_temp_dir_rx,
- PRIVATE_KEY_NAME),
- 'is_keypair_external': is_external
- }
-
- def get_keypair_inputs_not_writable_dir_rw(self, is_external, **kwargs):
- return {
- 'private_key': path.join(self.not_writable_temp_dir_rw,
- PRIVATE_KEY_NAME),
- 'is_keypair_external': is_external
- }
-
- def get_keypair_inputs_temp_dir(self, is_external, **kwargs):
- return {
- 'private_key': path.join(self.temp_dir, PRIVATE_KEY_NAME),
- 'is_keypair_external': is_external
- }
-
- @workflow_test(blueprint_path, inputs={
- 'private_key': '',
- 'is_keypair_external': False
- })
- @mock.patch('nova_plugin.keypair.validate_resource')
- def test_keypair_valid_config(self, cfy_local, *args):
-
- with mock.patch('nova_plugin.keypair.create',
- new=self.new_keypair_create):
- cfy_local.execute('install', task_retries=0)
-
- @workflow_test(blueprint_path, inputs='get_keypair_inputs_private_key',
- input_func_kwargs={'is_external': True})
- @mock.patch('nova_plugin.keypair.validate_resource')
- def test_keypair_valid_config_external(self, cfy_local, *args):
-
- with mock.patch('nova_plugin.keypair.create',
- new=self.new_keypair_create):
- cfy_local.execute('install', task_retries=0)
-
- @workflow_test(blueprint_path, inputs='get_keypair_inputs_temp_dir',
- input_func_kwargs={'is_external': True})
- @mock.patch('nova_plugin.keypair.validate_resource')
- def test_keypair_no_private_key(self, cfy_local, *args):
-
- with mock.patch('nova_plugin.keypair.create',
- new=self.new_keypair_create_with_exception):
- cfy_local.execute('install', task_retries=0)
-
- @workflow_test(blueprint_path, inputs='get_keypair_inputs_private_key',
- input_func_kwargs={'is_external': False})
- @mock.patch('nova_plugin.keypair.validate_resource')
- def test_keypair_local_and_exists(self, cfy_local, *args):
-
- with mock.patch('nova_plugin.keypair.create',
- new=self.new_keypair_create_with_exception):
- cfy_local.execute('install', task_retries=0)
-
- @workflow_test(blueprint_path, inputs='get_keypair_inputs_temp_dir',
- input_func_kwargs={'is_external': False})
- @mock.patch('nova_plugin.keypair.validate_resource')
- def test_keypair_local_temp_dir(self, cfy_local, *args):
-
- with mock.patch('nova_plugin.keypair.create',
- new=self.new_keypair_create):
- cfy_local.execute('install', task_retries=0)
-
- @workflow_test(blueprint_path,
- inputs='get_keypair_inputs_not_writable_dir_r',
- input_func_kwargs={'is_external': False})
- @mock.patch('nova_plugin.keypair.validate_resource')
- def test_keypair_local_non_writable_dir_r(self, cfy_local, *args):
-
- with mock.patch('nova_plugin.keypair.create',
- new=self.new_keypair_create_with_exception):
- cfy_local.execute('install', task_retries=0)
-
- @workflow_test(blueprint_path,
- inputs='get_keypair_inputs_not_writable_dir_rx',
- input_func_kwargs={'is_external': False})
- @mock.patch('nova_plugin.keypair.validate_resource')
- def test_keypair_local_non_writable_dir_rx(self, cfy_local, *args):
-
- with mock.patch('nova_plugin.keypair.create',
- new=self.new_keypair_create_with_exception):
- cfy_local.execute('install', task_retries=0)
-
- @workflow_test(blueprint_path,
- inputs='get_keypair_inputs_not_writable_dir_rw',
- input_func_kwargs={'is_external': False})
- @mock.patch('nova_plugin.keypair.validate_resource')
- def test_keypair_local_non_writable_dir_rw(self, cfy_local, *args):
-
- with mock.patch('nova_plugin.keypair.create',
- new=self.new_keypair_create_with_exception):
- cfy_local.execute('install', task_retries=0)
-
- @workflow_test(blueprint_path,
- inputs='get_keypair_inputs_not_readable_private_key',
- input_func_kwargs={'is_external': True})
- @mock.patch('nova_plugin.keypair.validate_resource')
- def test_keypair_not_readable_private_key(self, cfy_local, *args):
-
- with mock.patch('nova_plugin.keypair.create',
- new=self.new_keypair_create_with_exception):
- cfy_local.execute('install', task_retries=0)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/userdata.py b/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/userdata.py
deleted file mode 100644
index ba63bb5328..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/nova_plugin/userdata.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#########
-# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import requests
-
-from cloudify import compute
-from cloudify import exceptions
-from cloudify import ctx
-
-
-def handle_userdata(server):
-
- existing_userdata = server.get('userdata')
- install_agent_userdata = ctx.agent.init_script()
-
- if not (existing_userdata or install_agent_userdata):
- return
-
- if isinstance(existing_userdata, dict):
- ud_type = existing_userdata['type']
- if ud_type not in userdata_handlers:
- raise exceptions.NonRecoverableError(
- "Invalid type '{0}' for server userdata)".format(ud_type))
- existing_userdata = userdata_handlers[ud_type](existing_userdata)
-
- if not existing_userdata:
- final_userdata = install_agent_userdata
- elif not install_agent_userdata:
- final_userdata = existing_userdata
- else:
- final_userdata = compute.create_multi_mimetype_userdata(
- [existing_userdata, install_agent_userdata])
- server['userdata'] = final_userdata
-
-
-userdata_handlers = {
- 'http': lambda params: requests.get(params['url']).text
-}
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/__init__.py
deleted file mode 100644
index 6ed7daac0b..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/__init__.py
+++ /dev/null
@@ -1,1005 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from functools import wraps, partial
-import json
-import os
-import sys
-
-from IPy import IP
-from keystoneauth1 import loading, session
-import cinderclient.client as cinder_client
-import cinderclient.exceptions as cinder_exceptions
-import keystoneclient.v3.client as keystone_client
-import keystoneclient.exceptions as keystone_exceptions
-import neutronclient.v2_0.client as neutron_client
-import neutronclient.common.exceptions as neutron_exceptions
-import novaclient.client as nova_client
-import novaclient.exceptions as nova_exceptions
-import glanceclient.client as glance_client
-import glanceclient.exc as glance_exceptions
-
-import cloudify
-from cloudify import context, ctx
-from cloudify.exceptions import NonRecoverableError, RecoverableError
-
-INFINITE_RESOURCE_QUOTA = -1
-
-# properties
-USE_EXTERNAL_RESOURCE_PROPERTY = 'use_external_resource'
-CREATE_IF_MISSING_PROPERTY = 'create_if_missing'
-CONFIG_PROPERTY = 'multivim_config'
-
-# runtime properties
-OPENSTACK_AZ_PROPERTY = 'availability_zone'
-OPENSTACK_ID_PROPERTY = 'external_id' # resource's openstack id
-OPENSTACK_TYPE_PROPERTY = 'external_type' # resource's openstack type
-OPENSTACK_NAME_PROPERTY = 'external_name' # resource's openstack name
-CONDITIONALLY_CREATED = 'conditionally_created' # resource was
-# conditionally created
-CONFIG_RUNTIME_PROPERTY = CONFIG_PROPERTY # openstack configuration
-
-# operation inputs
-CONFIG_INPUT = CONFIG_PROPERTY
-
-# runtime properties which all types use
-COMMON_RUNTIME_PROPERTIES_KEYS = [OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY,
- CONDITIONALLY_CREATED]
-
-MISSING_RESOURCE_MESSAGE = "Couldn't find a resource of " \
- "type {0} with the name or id {1}"
-
-
-class ProviderContext(object):
-
- def __init__(self, provider_context):
- self._provider_context = provider_context or {}
- self._resources = self._provider_context.get('resources', {})
-
- @property
- def agents_keypair(self):
- return self._resources.get('agents_keypair')
-
- @property
- def agents_security_group(self):
- return self._resources.get('agents_security_group')
-
- @property
- def ext_network(self):
- return self._resources.get('ext_network')
-
- @property
- def floating_ip(self):
- return self._resources.get('floating_ip')
-
- @property
- def int_network(self):
- return self._resources.get('int_network')
-
- @property
- def management_keypair(self):
- return self._resources.get('management_keypair')
-
- @property
- def management_security_group(self):
- return self._resources.get('management_security_group')
-
- @property
- def management_server(self):
- return self._resources.get('management_server')
-
- @property
- def router(self):
- return self._resources.get('router')
-
- @property
- def subnet(self):
- return self._resources.get('subnet')
-
- def __repr__(self):
- info = json.dumps(self._provider_context)
- return '<' + self.__class__.__name__ + ' ' + info + '>'
-
-
-def provider(ctx):
- return ProviderContext(ctx.provider_context)
-
-
-def assign_payload_as_runtime_properties(ctx, resource_name, payload={}):
- """
- In general Openstack API objects have create, update, and delete
- functions. Each function normally receives a payload that describes
- the desired configuration of the object.
- This makes sure to store that configuration in the runtime
- properties and cleans any potentially sensitive data.
-
- :param ctx: The Cloudify NodeInstanceContext
- :param resource_name: A string describing the resource.
- :param payload: The payload.
- :return:
- """
-
- # Avoid failing if a developer inadvertently passes a
- # non-NodeInstanceContext
- if getattr(ctx, 'instance'):
- if resource_name not in ctx.instance.runtime_properties.keys():
- ctx.instance.runtime_properties[resource_name] = {}
- for key, value in payload.items():
- if key != 'user_data' and key != 'adminPass':
- ctx.instance.runtime_properties[resource_name][key] = value
-
-
-def get_relationships_by_relationship_type(ctx, type_name):
- """
- Get cloudify relationships by relationship type.
- Follows the inheritance tree.
-
- :param ctx: Cloudify NodeInstanceContext
- :param type_name: desired relationship type derived
- from cloudify.relationships.depends_on.
- :return: list of RelationshipSubjectContext
- """
-
- return [rel for rel in ctx.instance.relationships if
- type_name in rel.type_hierarchy]
-
-
-def get_attribute_of_connected_nodes_by_relationship_type(ctx,
- type_name,
- attribute_name):
- """
- Returns a list of OPENSTACK_ID_PROPERTY from a list of
- Cloudify RelationshipSubjectContext.
-
- :param ctx: Cloudify NodeInstanceContext
- :param type_name: desired relationship type derived
- from cloudify.relationships.depends_on.
- :param attribute_name: usually either
- OPENSTACK_NAME_PROPERTY or OPENSTACK_ID_PROPERTY
- :return:
- """
-
- return [rel.target.instance.runtime_properties[attribute_name]
- for rel in get_relationships_by_relationship_type(ctx, type_name)]
-
-
-def get_relationships_by_openstack_type(ctx, type_name):
- return [rel for rel in ctx.instance.relationships
- if rel.target.instance.runtime_properties.get(
- OPENSTACK_TYPE_PROPERTY) == type_name]
-
-
-def get_connected_nodes_by_openstack_type(ctx, type_name):
- return [rel.target.node
- for rel in get_relationships_by_openstack_type(ctx, type_name)]
-
-
-def get_openstack_ids_of_connected_nodes_by_openstack_type(ctx, type_name):
- return [rel.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY]
- for rel in get_relationships_by_openstack_type(ctx, type_name)
- ]
-
-
-def get_openstack_names_of_connected_nodes_by_openstack_type(ctx, type_name):
- return [rel.target.instance.runtime_properties[OPENSTACK_NAME_PROPERTY]
- for rel in get_relationships_by_openstack_type(ctx, type_name)
- ]
-
-
-def get_single_connected_node_by_openstack_type(
- ctx, type_name, if_exists=False):
- nodes = get_connected_nodes_by_openstack_type(ctx, type_name)
- check = len(nodes) > 1 if if_exists else len(nodes) != 1
- if check:
- raise NonRecoverableError(
- 'Expected {0} one {1} node. got {2}'.format(
- 'at most' if if_exists else 'exactly', type_name, len(nodes)))
- return nodes[0] if nodes else None
-
-
-def get_openstack_id_of_single_connected_node_by_openstack_type(
- ctx, type_name, if_exists=False):
- ids = get_openstack_ids_of_connected_nodes_by_openstack_type(ctx,
- type_name)
- check = len(ids) > 1 if if_exists else len(ids) != 1
- if check:
- raise NonRecoverableError(
- 'Expected {0} one {1} capability. got {2}'.format(
- 'at most' if if_exists else 'exactly', type_name, len(ids)))
- return ids[0] if ids else None
-
-
-def get_resource_id(ctx, type_name):
- if ctx.node.properties['resource_id']:
- return ctx.node.properties['resource_id']
- return "{0}_{1}_{2}".format(type_name, ctx.deployment.id, ctx.instance.id)
-
-
-def transform_resource_name(ctx, res):
-
- if isinstance(res, basestring):
- res = {'name': res}
-
- if not isinstance(res, dict):
- raise ValueError("transform_resource_name() expects either string or "
- "dict as the first parameter")
-
- pfx = ctx.bootstrap_context.resources_prefix
-
- if not pfx:
- return res['name']
-
- name = res['name']
- res['name'] = pfx + name
-
- if name.startswith(pfx):
- ctx.logger.warn("Prefixing resource '{0}' with '{1}' but it "
- "already has this prefix".format(name, pfx))
- else:
- ctx.logger.info("Transformed resource name '{0}' to '{1}'".format(
- name, res['name']))
-
- return res['name']
-
-
-def _get_resource_by_name_or_id_from_ctx(ctx, name_field_name, openstack_type,
- sugared_client):
- resource_id = ctx.node.properties['resource_id']
- if not resource_id:
- raise NonRecoverableError(
- "Can't set '{0}' to True without supplying a value for "
- "'resource_id'".format(USE_EXTERNAL_RESOURCE_PROPERTY))
-
- return get_resource_by_name_or_id(resource_id, openstack_type,
- sugared_client, True, name_field_name)
-
-
-def get_resource_by_name_or_id(
- resource_id, openstack_type, sugared_client,
- raise_if_not_found=True, name_field_name='name'):
-
- # search for resource by name (or name-equivalent field)
- search_param = {name_field_name: resource_id}
- resource = sugared_client.cosmo_get_if_exists(openstack_type,
- **search_param)
- if not resource:
- # fallback - search for resource by id
- resource = sugared_client.cosmo_get_if_exists(
- openstack_type, id=resource_id)
-
- if not resource and raise_if_not_found:
- raise NonRecoverableError(
- MISSING_RESOURCE_MESSAGE.format(openstack_type, resource_id))
-
- return resource
-
-
-def use_external_resource(ctx, sugared_client, openstack_type,
- name_field_name='name'):
- if not is_external_resource(ctx):
- return None
- try:
- resource = _get_resource_by_name_or_id_from_ctx(
- ctx, name_field_name, openstack_type, sugared_client)
- except NonRecoverableError:
- if is_create_if_missing(ctx):
- ctx.instance.runtime_properties[CONDITIONALLY_CREATED] = True
- return None
- else:
- raise
-
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = \
- sugared_client.get_id_from_resource(resource)
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = openstack_type
-
- from openstack_plugin_common.floatingip import FLOATINGIP_OPENSTACK_TYPE
- # store openstack name runtime property, unless it's a floating IP type,
- # in which case the ip will be stored in the runtime properties instead.
- if openstack_type != FLOATINGIP_OPENSTACK_TYPE:
- ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \
- sugared_client.get_name_from_resource(resource)
-
- ctx.logger.info('Using external resource {0}: {1}'.format(
- openstack_type, ctx.node.properties['resource_id']))
- return resource
-
-
-def validate_resource(ctx, sugared_client, openstack_type,
- name_field_name='name'):
- ctx.logger.debug('validating resource {0} (node {1})'.format(
- openstack_type, ctx.node.id))
-
- openstack_type_plural = sugared_client.cosmo_plural(openstack_type)
- resource = None
-
- if is_external_resource(ctx):
-
- try:
- # validate the resource truly exists
- resource = _get_resource_by_name_or_id_from_ctx(
- ctx, name_field_name, openstack_type, sugared_client)
- ctx.logger.debug('OK: {0} {1} found in pool'.format(
- openstack_type, ctx.node.properties['resource_id']))
- except NonRecoverableError as e:
- if not is_create_if_missing(ctx):
- ctx.logger.error('VALIDATION ERROR: ' + str(e))
- resource_list = list(sugared_client.cosmo_list(openstack_type))
- if resource_list:
- ctx.logger.info('list of existing {0}: '.format(
- openstack_type_plural))
- for resource in resource_list:
- ctx.logger.info(' {0:>10} - {1}'.format(
- sugared_client.get_id_from_resource(resource),
- sugared_client.get_name_from_resource(resource)))
- else:
- ctx.logger.info('there are no existing {0}'.format(
- openstack_type_plural))
- raise
- if not resource:
- if isinstance(sugared_client, NovaClientWithSugar):
- # not checking quota for Nova resources due to a bug in Nova client
- return
-
- # validate available quota for provisioning the resource
- resource_list = list(sugared_client.cosmo_list(openstack_type))
- resource_amount = len(resource_list)
-
- resource_quota = sugared_client.get_quota(openstack_type)
-
- if resource_amount < resource_quota \
- or resource_quota == INFINITE_RESOURCE_QUOTA:
- ctx.logger.debug(
- 'OK: {0} (node {1}) can be created. provisioned {2}: {3}, '
- 'quota: {4}'
- .format(openstack_type, ctx.node.id, openstack_type_plural,
- resource_amount, resource_quota))
- else:
- err = ('{0} (node {1}) cannot be created due to quota limitations.'
- ' provisioned {2}: {3}, quota: {4}'
- .format(openstack_type, ctx.node.id, openstack_type_plural,
- resource_amount, resource_quota))
- ctx.logger.error('VALIDATION ERROR:' + err)
- raise NonRecoverableError(err)
-
-
-def delete_resource_and_runtime_properties(ctx, sugared_client,
- runtime_properties_keys):
- node_openstack_type = ctx.instance.runtime_properties[
- OPENSTACK_TYPE_PROPERTY]
- if not is_external_resource(ctx):
- ctx.logger.info('deleting {0}'.format(node_openstack_type))
- sugared_client.cosmo_delete_resource(
- node_openstack_type,
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY])
- else:
- ctx.logger.info('not deleting {0} since an external {0} is '
- 'being used'.format(node_openstack_type))
-
- delete_runtime_properties(ctx, runtime_properties_keys)
-
-
-def is_external_resource(ctx):
- return is_external_resource_by_properties(ctx.node.properties)
-
-
-def is_external_resource_not_conditionally_created(ctx):
- return is_external_resource_by_properties(ctx.node.properties) and \
- not ctx.instance.runtime_properties.get(CONDITIONALLY_CREATED)
-
-
-def is_external_relationship_not_conditionally_created(ctx):
- return is_external_resource_by_properties(ctx.source.node.properties) and \
- is_external_resource_by_properties(ctx.target.node.properties) and \
- not ctx.source.instance.runtime_properties.get(
- CONDITIONALLY_CREATED) and not \
- ctx.target.instance.runtime_properties.get(CONDITIONALLY_CREATED)
-
-
-def is_create_if_missing(ctx):
- return is_create_if_missing_by_properties(ctx.node.properties)
-
-
-def is_external_relationship(ctx):
- return is_external_resource_by_properties(ctx.source.node.properties) and \
- is_external_resource_by_properties(ctx.target.node.properties)
-
-
-def is_external_resource_by_properties(properties):
- return USE_EXTERNAL_RESOURCE_PROPERTY in properties and \
- properties[USE_EXTERNAL_RESOURCE_PROPERTY]
-
-
-def is_create_if_missing_by_properties(properties):
- return CREATE_IF_MISSING_PROPERTY in properties and \
- properties[CREATE_IF_MISSING_PROPERTY]
-
-
-def delete_runtime_properties(ctx, runtime_properties_keys):
- for runtime_prop_key in runtime_properties_keys:
- if runtime_prop_key in ctx.instance.runtime_properties:
- del ctx.instance.runtime_properties[runtime_prop_key]
-
-
-def validate_ip_or_range_syntax(ctx, address, is_range=True):
- range_suffix = ' range' if is_range else ''
- ctx.logger.debug('checking whether {0} is a valid address{1}...'
- .format(address, range_suffix))
- try:
- IP(address)
- ctx.logger.debug('OK:'
- '{0} is a valid address{1}.'.format(address,
- range_suffix))
- except ValueError as e:
- err = ('{0} is not a valid address{1}; {2}'.format(
- address, range_suffix, e.message))
- ctx.logger.error('VALIDATION ERROR:' + err)
- raise NonRecoverableError(err)
-
-
-class Config(object):
-
- OPENSTACK_CONFIG_PATH_ENV_VAR = 'OPENSTACK_CONFIG_PATH'
- OPENSTACK_CONFIG_PATH_DEFAULT_PATH = '~/openstack_config.json'
- OPENSTACK_ENV_VAR_PREFIX = 'OS_'
- OPENSTACK_SUPPORTED_ENV_VARS = {
- 'OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_NAME',
- 'OS_REGION_NAME', 'OS_PROJECT_ID', 'OS_PROJECT_NAME',
- 'OS_USER_DOMAIN_NAME', 'OS_PROJECT_DOMAIN_NAME'
- }
-
- @classmethod
- def get(cls):
- static_config = cls._build_config_from_env_variables()
- env_name = cls.OPENSTACK_CONFIG_PATH_ENV_VAR
- default_location_tpl = cls.OPENSTACK_CONFIG_PATH_DEFAULT_PATH
- default_location = os.path.expanduser(default_location_tpl)
- config_path = os.getenv(env_name, default_location)
- try:
- with open(config_path) as f:
- cls.update_config(static_config, json.loads(f.read()))
- except IOError:
- pass
- return static_config
-
- @classmethod
- def _build_config_from_env_variables(cls):
- return {v.lstrip(cls.OPENSTACK_ENV_VAR_PREFIX).lower(): os.environ[v]
- for v in cls.OPENSTACK_SUPPORTED_ENV_VARS if v in os.environ}
-
- @staticmethod
- def update_config(overridden_cfg, overriding_cfg):
- """ this method is like dict.update() only that it doesn't override
- with (or set new) empty values (e.g. empty string) """
- for k, v in overriding_cfg.iteritems():
- if v:
- overridden_cfg[k] = v
-
-
-class OpenStackClient(object):
-
- COMMON = {'username', 'password', 'auth_url'}
- AUTH_SETS = [
- COMMON | {'tenant_name'},
- COMMON | {'project_id', 'user_domain_name'},
- COMMON | {'project_id', 'project_name', 'user_domain_name'},
- COMMON | {'project_name', 'user_domain_name', 'project_domain_name'},
- ]
- OPTIONAL_AUTH_PARAMS = {'insecure'}
-
- def __init__(self, client_name, client_class, config=None, *args, **kw):
- cfg = Config.get()
-
- if config:
- Config.update_config(cfg, config)
-
- v3 = '/v3' in cfg['auth_url']
- # Newer libraries expect the region key to be `region_name`, not
- # `region`.
- region = cfg.pop('region', None)
- if v3 and region:
- cfg['region_name'] = region
-
- cfg = self._merge_custom_configuration(cfg, client_name)
-
- auth_params, client_params = OpenStackClient._split_config(cfg)
- OpenStackClient._validate_auth_params(auth_params)
-
- if v3:
- # keystone v3 complains if these aren't set.
- for key in 'user_domain_name', 'project_domain_name':
- auth_params.setdefault(key, 'default')
-
- client_params['session'] = self._authenticate(auth_params)
- self._client = client_class(**client_params)
-
- @classmethod
- def _validate_auth_params(cls, params):
- if set(params.keys()) - cls.OPTIONAL_AUTH_PARAMS in cls.AUTH_SETS:
- return
-
- def set2str(s):
- return '({})'.format(', '.join(sorted(s)))
-
- received_params = set2str(params)
- valid_auth_sets = map(set2str, cls.AUTH_SETS)
- raise NonRecoverableError(
- "{} is not valid set of auth params. Expected to find parameters "
- "either as environment variables, in a JSON file (at either a "
- "path which is set under the environment variable {} or at the "
- "default location {}), or as nested properties under an "
- "'{}' property. Valid auth param sets are: {}."
- .format(received_params,
- Config.OPENSTACK_CONFIG_PATH_ENV_VAR,
- Config.OPENSTACK_CONFIG_PATH_DEFAULT_PATH,
- CONFIG_PROPERTY,
- ', '.join(valid_auth_sets)))
-
- @staticmethod
- def _merge_custom_configuration(cfg, client_name):
- config = cfg.copy()
-
- mapping = {
- 'nova_url': 'nova_client',
- 'neutron_url': 'neutron_client'
- }
- for key in 'nova_url', 'neutron_url':
- val = config.pop(key, None)
- if val is not None:
- ctx.logger.warn(
- "'{}' property is deprecated. Use `custom_configuration"
- ".{}.endpoint_override` instead.".format(
- key, mapping[key]))
- if mapping.get(key, None) == client_name:
- config['endpoint_override'] = val
-
- if 'custom_configuration' in cfg:
- del config['custom_configuration']
- config.update(cfg['custom_configuration'].get(client_name, {}))
- return config
-
- @classmethod
- def _split_config(cls, cfg):
- all = reduce(lambda x, y: x | y, cls.AUTH_SETS)
- all |= cls.OPTIONAL_AUTH_PARAMS
-
- auth, misc = {}, {}
- for param, value in cfg.items():
- if param in all:
- auth[param] = value
- else:
- misc[param] = value
- return auth, misc
-
- @staticmethod
- def _authenticate(cfg):
- verify = True
- if 'insecure' in cfg:
- cfg = cfg.copy()
- # NOTE: Next line will evaluate to False only when insecure is set
- # to True. Any other value (string etc.) will force verify to True.
- # This is done on purpose, since we do not wish to use insecure
- # connection by mistake.
- verify = not (cfg['insecure'] is True)
- del cfg['insecure']
- loader = loading.get_plugin_loader("password")
- auth = loader.load_from_options(**cfg)
- sess = session.Session(auth=auth, verify=verify)
- return sess
-
- # Proxy any unknown call to base client
- def __getattr__(self, attr):
- return getattr(self._client, attr)
-
- # Sugar, common to all clients
- def cosmo_plural(self, obj_type_single):
- return obj_type_single + 's'
-
- def cosmo_get_named(self, obj_type_single, name, **kw):
- return self.cosmo_get(obj_type_single, name=name, **kw)
-
- def cosmo_get(self, obj_type_single, **kw):
- return self._cosmo_get(obj_type_single, False, **kw)
-
- def cosmo_get_if_exists(self, obj_type_single, **kw):
- return self._cosmo_get(obj_type_single, True, **kw)
-
- def _cosmo_get(self, obj_type_single, if_exists, **kw):
- ls = list(self.cosmo_list(obj_type_single, **kw))
- check = len(ls) > 1 if if_exists else len(ls) != 1
- if check:
- raise NonRecoverableError(
- "Expected {0} one object of type {1} "
- "with match {2} but there are {3}".format(
- 'at most' if if_exists else 'exactly',
- obj_type_single, kw, len(ls)))
- return ls[0] if ls else None
-
-
-class GlanceClient(OpenStackClient):
-
- # Can't glance_url be figured out from keystone
- REQUIRED_CONFIG_PARAMS = \
- ['username', 'password', 'tenant_name', 'auth_url']
-
- def connect(self, cfg):
- loader = loading.get_plugin_loader('password')
- auth = loader.load_from_options(
- auth_url=cfg['auth_url'],
- username=cfg['username'],
- password=cfg['password'],
- tenant_name=cfg['tenant_name'])
- sess = session.Session(auth=auth)
-
- client_kwargs = dict(
- session=sess,
- )
- if cfg.get('glance_url'):
- client_kwargs['endpoint'] = cfg['glance_url']
-
- return GlanceClientWithSugar(**client_kwargs)
-
-
-# Decorators
-def _find_instanceof_in_kw(cls, kw):
- ret = [v for v in kw.values() if isinstance(v, cls)]
- if not ret:
- return None
- if len(ret) > 1:
- raise NonRecoverableError(
- "Expected to find exactly one instance of {0} in "
- "kwargs but found {1}".format(cls, len(ret)))
- return ret[0]
-
-
-def _find_context_in_kw(kw):
- return _find_instanceof_in_kw(cloudify.context.CloudifyContext, kw)
-
-
-def with_neutron_client(f):
- @wraps(f)
- def wrapper(*args, **kw):
- _put_client_in_kw('neutron_client', NeutronClientWithSugar, kw)
-
- try:
- return f(*args, **kw)
- except neutron_exceptions.NeutronClientException, e:
- if e.status_code in _non_recoverable_error_codes:
- _re_raise(e, recoverable=False, status_code=e.status_code)
- else:
- raise
- return wrapper
-
-
-def with_nova_client(f):
- @wraps(f)
- def wrapper(*args, **kw):
- _put_client_in_kw('nova_client', NovaClientWithSugar, kw)
-
- try:
- return f(*args, **kw)
- except nova_exceptions.OverLimit, e:
- _re_raise(e, recoverable=True, retry_after=e.retry_after)
- except nova_exceptions.ClientException, e:
- if e.code in _non_recoverable_error_codes:
- _re_raise(e, recoverable=False, status_code=e.code)
- else:
- raise
- return wrapper
-
-
-def with_cinder_client(f):
- @wraps(f)
- def wrapper(*args, **kw):
- _put_client_in_kw('cinder_client', CinderClientWithSugar, kw)
-
- try:
- return f(*args, **kw)
- except cinder_exceptions.ClientException, e:
- if e.code in _non_recoverable_error_codes:
- _re_raise(e, recoverable=False, status_code=e.code)
- else:
- raise
- return wrapper
-
-
-def with_glance_client(f):
- @wraps(f)
- def wrapper(*args, **kw):
- _put_client_in_kw('glance_client', GlanceClientWithSugar, kw)
-
- try:
- return f(*args, **kw)
- except glance_exceptions.ClientException, e:
- if e.code in _non_recoverable_error_codes:
- _re_raise(e, recoverable=False, status_code=e.code)
- else:
- raise
- return wrapper
-
-
-def with_keystone_client(f):
- @wraps(f)
- def wrapper(*args, **kw):
- _put_client_in_kw('keystone_client', KeystoneClientWithSugar, kw)
-
- try:
- return f(*args, **kw)
- except keystone_exceptions.HTTPError, e:
- if e.http_status in _non_recoverable_error_codes:
- _re_raise(e, recoverable=False, status_code=e.http_status)
- else:
- raise
- except keystone_exceptions.ClientException, e:
- _re_raise(e, recoverable=False)
- return wrapper
-
-
-def _put_client_in_kw(client_name, client_class, kw):
- if client_name in kw:
- return
-
- ctx = _find_context_in_kw(kw)
- if ctx.type == context.NODE_INSTANCE:
- config = ctx.node.properties.get(CONFIG_PROPERTY)
- rt_config = ctx.instance.runtime_properties.get(
- CONFIG_RUNTIME_PROPERTY)
- elif ctx.type == context.RELATIONSHIP_INSTANCE:
- config = ctx.source.node.properties.get(CONFIG_PROPERTY)
- rt_config = ctx.source.instance.runtime_properties.get(
- CONFIG_RUNTIME_PROPERTY)
- if not config:
- config = ctx.target.node.properties.get(CONFIG_PROPERTY)
- rt_config = ctx.target.instance.runtime_properties.get(
- CONFIG_RUNTIME_PROPERTY)
-
- else:
- config = None
- rt_config = None
-
- # Overlay with configuration from runtime property, if any.
- if rt_config:
- if config:
- config = config.copy()
- config.update(rt_config)
- else:
- config = rt_config
-
- if CONFIG_INPUT in kw:
- if config:
- config = config.copy()
- config.update(kw[CONFIG_INPUT])
- else:
- config = kw[CONFIG_INPUT]
- kw[client_name] = client_class(config=config)
-
-
-_non_recoverable_error_codes = [400, 401, 403, 404, 409]
-
-
-def _re_raise(e, recoverable, retry_after=None, status_code=None):
- exc_type, exc, traceback = sys.exc_info()
- message = e.message
- if status_code is not None:
- message = '{0} [status_code={1}]'.format(message, status_code)
- if recoverable:
- if retry_after == 0:
- retry_after = None
- raise RecoverableError(
- message=message,
- retry_after=retry_after), None, traceback
- else:
- raise NonRecoverableError(message), None, traceback
-
-
-# Sugar for clients
-
-class NovaClientWithSugar(OpenStackClient):
-
- def __init__(self, *args, **kw):
- config = kw['config']
- if config.get('nova_url'):
- config['endpoint_override'] = config.pop('nova_url')
-
- super(NovaClientWithSugar, self).__init__(
- 'nova_client', partial(nova_client.Client, '2'), *args, **kw)
-
- def cosmo_list(self, obj_type_single, **kw):
- """ Sugar for xxx.findall() - not using xxx.list() because findall
- can receive filtering parameters, and it's common for all types"""
- obj_type_plural = self._get_nova_field_name_for_type(obj_type_single)
- for obj in getattr(self, obj_type_plural).findall(**kw):
- yield obj
-
- def cosmo_delete_resource(self, obj_type_single, obj_id):
- obj_type_plural = self._get_nova_field_name_for_type(obj_type_single)
- getattr(self, obj_type_plural).delete(obj_id)
-
- def get_id_from_resource(self, resource):
- return resource.id
-
- def get_name_from_resource(self, resource):
- return resource.name
-
- def get_quota(self, obj_type_single):
- raise RuntimeError(
- 'Retrieving quotas from Nova service is currently unsupported '
- 'due to a bug in Nova python client')
-
- # we're already authenticated, but the following call will make
- # 'service_catalog' available under 'client', through which we can
- # extract the tenant_id (Note that self.client.tenant_id might be
- # None if project_id (AKA tenant_name) was used instead; However the
- # actual tenant_id must be used to retrieve the quotas)
- self.client.authenticate()
- tenant_id = self.client.service_catalog.get_tenant_id()
- quotas = self.quotas.get(tenant_id)
- return getattr(quotas, self.cosmo_plural(obj_type_single))
-
- def _get_nova_field_name_for_type(self, obj_type_single):
- from openstack_plugin_common.floatingip import \
- FLOATINGIP_OPENSTACK_TYPE
- if obj_type_single == FLOATINGIP_OPENSTACK_TYPE:
- # since we use the same 'openstack type' property value for both
- # neutron and nova floating-ips, this adjustment must be made
- # for nova client, as fields names differ between the two clients
- obj_type_single = 'floating_ip'
- return self.cosmo_plural(obj_type_single)
-
-
-class NeutronClientWithSugar(OpenStackClient):
-
- def __init__(self, *args, **kw):
- super(NeutronClientWithSugar, self).__init__(
- 'neutron_client', neutron_client.Client, *args, **kw)
-
- def cosmo_list(self, obj_type_single, **kw):
- """ Sugar for list_XXXs()['XXXs'] """
- obj_type_plural = self.cosmo_plural(obj_type_single)
- for obj in getattr(self, 'list_' + obj_type_plural)(**kw)[
- obj_type_plural]:
- yield obj
-
- def cosmo_delete_resource(self, obj_type_single, obj_id):
- getattr(self, 'delete_' + obj_type_single)(obj_id)
-
- def get_id_from_resource(self, resource):
- return resource['id']
-
- def get_name_from_resource(self, resource):
- return resource['name']
-
- def get_quota(self, obj_type_single):
- tenant_id = self.get_quotas_tenant()['tenant']['tenant_id']
- quotas = self.show_quota(tenant_id)['quota']
- return quotas[obj_type_single]
-
- def cosmo_list_prefixed(self, obj_type_single, name_prefix):
- for obj in self.cosmo_list(obj_type_single):
- if obj['name'].startswith(name_prefix):
- yield obj
-
- def cosmo_delete_prefixed(self, name_prefix):
- # Cleanup all neutron.list_XXX() objects with names starting
- # with self.name_prefix
- for obj_type_single in 'port', 'router', 'network', 'subnet',\
- 'security_group':
- for obj in self.cosmo_list_prefixed(obj_type_single, name_prefix):
- if obj_type_single == 'router':
- ports = self.cosmo_list('port', device_id=obj['id'])
- for port in ports:
- try:
- self.remove_interface_router(
- port['device_id'],
- {'port_id': port['id']})
- except neutron_exceptions.NeutronClientException:
- pass
- getattr(self, 'delete_' + obj_type_single)(obj['id'])
-
- def cosmo_find_external_net(self):
- """ For tests of floating IP """
- nets = self.list_networks()['networks']
- ls = [net for net in nets if net.get('router:external')]
- if len(ls) != 1:
- raise NonRecoverableError(
- "Expected exactly one external network but found {0}".format(
- len(ls)))
- return ls[0]
-
-
-class CinderClientWithSugar(OpenStackClient):
-
- def __init__(self, *args, **kw):
- super(CinderClientWithSugar, self).__init__(
- 'cinder_client', partial(cinder_client.Client, '2'), *args, **kw)
-
- def cosmo_list(self, obj_type_single, **kw):
- obj_type_plural = self.cosmo_plural(obj_type_single)
- for obj in getattr(self, obj_type_plural).findall(**kw):
- yield obj
-
- def cosmo_delete_resource(self, obj_type_single, obj_id):
- obj_type_plural = self.cosmo_plural(obj_type_single)
- getattr(self, obj_type_plural).delete(obj_id)
-
- def get_id_from_resource(self, resource):
- return resource.id
-
- def get_name_from_resource(self, resource):
- return resource.name
-
- def get_quota(self, obj_type_single):
- # we're already authenticated, but the following call will make
- # 'service_catalog' available under 'client', through which we can
- # extract the tenant_id (Note that self.client.tenant_id might be
- # None if project_id (AKA tenant_name) was used instead; However the
- # actual tenant_id must be used to retrieve the quotas)
- self.client.authenticate()
- project_id = self.client.session.get_project_id()
- quotas = self.quotas.get(project_id)
- return getattr(quotas, self.cosmo_plural(obj_type_single))
-
-
-class KeystoneClientWithSugar(OpenStackClient):
- # keystone does not have resource quota
- KEYSTONE_INFINITE_RESOURCE_QUOTA = 10**9
-
- def __init__(self, *args, **kw):
- super(KeystoneClientWithSugar, self).__init__(
- 'keystone_client', keystone_client.Client, *args, **kw)
-
- def cosmo_list(self, obj_type_single, **kw):
- obj_type_plural = self.cosmo_plural(obj_type_single)
- for obj in getattr(self, obj_type_plural).list(**kw):
- yield obj
-
- def cosmo_delete_resource(self, obj_type_single, obj_id):
- obj_type_plural = self.cosmo_plural(obj_type_single)
- getattr(self, obj_type_plural).delete(obj_id)
-
- def get_id_from_resource(self, resource):
- return resource.id
-
- def get_name_from_resource(self, resource):
- return resource.name
-
- def get_quota(self, obj_type_single):
- return self.KEYSTONE_INFINITE_RESOURCE_QUOTA
-
-
-class GlanceClientWithSugar(OpenStackClient):
- GLANCE_INIFINITE_RESOURCE_QUOTA = 10**9
-
- def __init__(self, *args, **kw):
- super(GlanceClientWithSugar, self).__init__(
- 'glance_client', partial(glance_client.Client, '2'), *args, **kw)
-
- def cosmo_list(self, obj_type_single, **kw):
- obj_type_plural = self.cosmo_plural(obj_type_single)
- return getattr(self, obj_type_plural).list(filters=kw)
-
- def cosmo_delete_resource(self, obj_type_single, obj_id):
- obj_type_plural = self.cosmo_plural(obj_type_single)
- getattr(self, obj_type_plural).delete(obj_id)
-
- def get_id_from_resource(self, resource):
- return resource.id
-
- def get_name_from_resource(self, resource):
- return resource.name
-
- def get_quota(self, obj_type_single):
- return self.GLANCE_INIFINITE_RESOURCE_QUOTA
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/floatingip.py b/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/floatingip.py
deleted file mode 100644
index fe5896520b..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/floatingip.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from cloudify import ctx
-from openstack_plugin_common import (
- delete_resource_and_runtime_properties,
- use_external_resource,
- validate_resource,
- COMMON_RUNTIME_PROPERTIES_KEYS,
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY)
-
-
-FLOATINGIP_OPENSTACK_TYPE = 'floatingip'
-
-# Runtime properties
-IP_ADDRESS_PROPERTY = 'floating_ip_address' # the actual ip address
-RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + \
- [IP_ADDRESS_PROPERTY]
-
-
-def use_external_floatingip(client, ip_field_name, ext_fip_ip_extractor):
- external_fip = use_external_resource(
- ctx, client, FLOATINGIP_OPENSTACK_TYPE, ip_field_name)
- if external_fip:
- ctx.instance.runtime_properties[IP_ADDRESS_PROPERTY] = \
- ext_fip_ip_extractor(external_fip)
- return True
-
- return False
-
-
-def set_floatingip_runtime_properties(fip_id, ip_address):
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = fip_id
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \
- FLOATINGIP_OPENSTACK_TYPE
- ctx.instance.runtime_properties[IP_ADDRESS_PROPERTY] = ip_address
-
-
-def delete_floatingip(client, **kwargs):
- delete_resource_and_runtime_properties(ctx, client,
- RUNTIME_PROPERTIES_KEYS)
-
-
-def floatingip_creation_validation(client, ip_field_name, **kwargs):
- validate_resource(ctx, client, FLOATINGIP_OPENSTACK_TYPE,
- ip_field_name)
-
-
-def get_server_floating_ip(neutron_client, server_id):
-
- floating_ips = neutron_client.list_floatingips()
-
- floating_ips = floating_ips.get('floatingips')
- if not floating_ips:
- return None
-
- for floating_ip in floating_ips:
- port_id = floating_ip.get('port_id')
- if not port_id:
- # this floating ip is not attached to any port
- continue
-
- port = neutron_client.show_port(port_id)['port']
- device_id = port.get('device_id')
- if not device_id:
- # this port is not attached to any server
- continue
-
- if server_id == device_id:
- return floating_ip
- return None
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/security_group.py b/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/security_group.py
deleted file mode 100644
index 0fa21aa149..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/security_group.py
+++ /dev/null
@@ -1,148 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import copy
-import re
-
-from cloudify import ctx
-from cloudify.exceptions import NonRecoverableError
-
-from openstack_plugin_common import (
- get_resource_id,
- use_external_resource,
- delete_resource_and_runtime_properties,
- validate_resource,
- validate_ip_or_range_syntax,
- OPENSTACK_ID_PROPERTY,
- OPENSTACK_TYPE_PROPERTY,
- OPENSTACK_NAME_PROPERTY,
- COMMON_RUNTIME_PROPERTIES_KEYS
-)
-
-SECURITY_GROUP_OPENSTACK_TYPE = 'security_group'
-
-# Runtime properties
-RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS
-
-NODE_NAME_RE = re.compile('^(.*)_.*$') # Anything before last underscore
-
-
-def build_sg_data(args=None):
- security_group = {
- 'description': None,
- 'name': get_resource_id(ctx, SECURITY_GROUP_OPENSTACK_TYPE),
- }
-
- args = args or {}
- security_group.update(ctx.node.properties['security_group'], **args)
-
- return security_group
-
-
-def process_rules(client, sgr_default_values, cidr_field_name,
- remote_group_field_name, min_port_field_name,
- max_port_field_name):
- rules_to_apply = ctx.node.properties['rules']
- security_group_rules = []
- for rule in rules_to_apply:
- security_group_rules.append(
- _process_rule(rule, client, sgr_default_values, cidr_field_name,
- remote_group_field_name, min_port_field_name,
- max_port_field_name))
-
- return security_group_rules
-
-
-def use_external_sg(client):
- return use_external_resource(ctx, client,
- SECURITY_GROUP_OPENSTACK_TYPE)
-
-
-def set_sg_runtime_properties(sg, client):
- ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] =\
- client.get_id_from_resource(sg)
- ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] =\
- SECURITY_GROUP_OPENSTACK_TYPE
- ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \
- client.get_name_from_resource(sg)
-
-
-def delete_sg(client, **kwargs):
- delete_resource_and_runtime_properties(ctx, client,
- RUNTIME_PROPERTIES_KEYS)
-
-
-def sg_creation_validation(client, cidr_field_name, **kwargs):
- validate_resource(ctx, client, SECURITY_GROUP_OPENSTACK_TYPE)
-
- ctx.logger.debug('validating CIDR for rules with a {0} field'.format(
- cidr_field_name))
- for rule in ctx.node.properties['rules']:
- if cidr_field_name in rule:
- validate_ip_or_range_syntax(ctx, rule[cidr_field_name])
-
-
-def _process_rule(rule, client, sgr_default_values, cidr_field_name,
- remote_group_field_name, min_port_field_name,
- max_port_field_name):
- ctx.logger.debug(
- "Security group rule before transformations: {0}".format(rule))
-
- sgr = copy.deepcopy(sgr_default_values)
- if 'port' in rule:
- rule[min_port_field_name] = rule['port']
- rule[max_port_field_name] = rule['port']
- del rule['port']
- sgr.update(rule)
-
- if (remote_group_field_name in sgr) and sgr[remote_group_field_name]:
- sgr[cidr_field_name] = None
- elif ('remote_group_node' in sgr) and sgr['remote_group_node']:
- _, remote_group_node = _capabilities_of_node_named(
- sgr['remote_group_node'])
- sgr[remote_group_field_name] = remote_group_node[OPENSTACK_ID_PROPERTY]
- del sgr['remote_group_node']
- sgr[cidr_field_name] = None
- elif ('remote_group_name' in sgr) and sgr['remote_group_name']:
- sgr[remote_group_field_name] = \
- client.get_id_from_resource(
- client.cosmo_get_named(
- SECURITY_GROUP_OPENSTACK_TYPE, sgr['remote_group_name']))
- del sgr['remote_group_name']
- sgr[cidr_field_name] = None
-
- ctx.logger.debug(
- "Security group rule after transformations: {0}".format(sgr))
- return sgr
-
-
-def _capabilities_of_node_named(node_name):
- result = None
- caps = ctx.capabilities.get_all()
- for node_id in caps:
- match = NODE_NAME_RE.match(node_id)
- if match:
- candidate_node_name = match.group(1)
- if candidate_node_name == node_name:
- if result:
- raise NonRecoverableError(
- "More than one node named '{0}' "
- "in capabilities".format(node_name))
- result = (node_id, caps[node_id])
- if not result:
- raise NonRecoverableError(
- "Could not find node named '{0}' "
- "in capabilities".format(node_name))
- return result
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/__init__.py
+++ /dev/null
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/openstack_client_tests.py b/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/openstack_client_tests.py
deleted file mode 100644
index 27d443c2e4..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/openstack_client_tests.py
+++ /dev/null
@@ -1,849 +0,0 @@
-########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import os
-import unittest
-import tempfile
-import json
-import __builtin__ as builtins
-
-import mock
-from cloudify.exceptions import NonRecoverableError
-
-from cloudify.mocks import MockCloudifyContext
-import openstack_plugin_common as common
-
-
-class ConfigTests(unittest.TestCase):
-
- @mock.patch.dict('os.environ', clear=True)
- def test__build_config_from_env_variables_empty(self):
- cfg = common.Config._build_config_from_env_variables()
- self.assertEqual({}, cfg)
-
- @mock.patch.dict('os.environ', clear=True,
- OS_AUTH_URL='test_url')
- def test__build_config_from_env_variables_single(self):
- cfg = common.Config._build_config_from_env_variables()
- self.assertEqual({'auth_url': 'test_url'}, cfg)
-
- @mock.patch.dict('os.environ', clear=True,
- OS_AUTH_URL='test_url',
- OS_PASSWORD='pass',
- OS_REGION_NAME='region')
- def test__build_config_from_env_variables_multiple(self):
- cfg = common.Config._build_config_from_env_variables()
- self.assertEqual({
- 'auth_url': 'test_url',
- 'password': 'pass',
- 'region_name': 'region',
- }, cfg)
-
- @mock.patch.dict('os.environ', clear=True,
- OS_INVALID='invalid',
- PASSWORD='pass',
- os_region_name='region')
- def test__build_config_from_env_variables_all_ignored(self):
- cfg = common.Config._build_config_from_env_variables()
- self.assertEqual({}, cfg)
-
- @mock.patch.dict('os.environ', clear=True,
- OS_AUTH_URL='test_url',
- OS_PASSWORD='pass',
- OS_REGION_NAME='region',
- OS_INVALID='invalid',
- PASSWORD='pass',
- os_region_name='region')
- def test__build_config_from_env_variables_extract_valid(self):
- cfg = common.Config._build_config_from_env_variables()
- self.assertEqual({
- 'auth_url': 'test_url',
- 'password': 'pass',
- 'region_name': 'region',
- }, cfg)
-
- def test_update_config_empty_target(self):
- target = {}
- override = {'k1': 'u1'}
- result = override.copy()
-
- common.Config.update_config(target, override)
- self.assertEqual(result, target)
-
- def test_update_config_empty_override(self):
- target = {'k1': 'v1'}
- override = {}
- result = target.copy()
-
- common.Config.update_config(target, override)
- self.assertEqual(result, target)
-
- def test_update_config_disjoint_configs(self):
- target = {'k1': 'v1'}
- override = {'k2': 'u2'}
- result = target.copy()
- result.update(override)
-
- common.Config.update_config(target, override)
- self.assertEqual(result, target)
-
- def test_update_config_do_not_remove_empty_from_target(self):
- target = {'k1': ''}
- override = {}
- result = target.copy()
-
- common.Config.update_config(target, override)
- self.assertEqual(result, target)
-
- def test_update_config_no_empty_in_override(self):
- target = {'k1': 'v1', 'k2': 'v2'}
- override = {'k1': 'u2'}
- result = target.copy()
- result.update(override)
-
- common.Config.update_config(target, override)
- self.assertEqual(result, target)
-
- def test_update_config_all_empty_in_override(self):
- target = {'k1': '', 'k2': 'v2'}
- override = {'k1': '', 'k3': ''}
- result = target.copy()
-
- common.Config.update_config(target, override)
- self.assertEqual(result, target)
-
- def test_update_config_misc(self):
- target = {'k1': 'v1', 'k2': 'v2'}
- override = {'k1': '', 'k2': 'u2', 'k3': '', 'k4': 'u4'}
- result = {'k1': 'v1', 'k2': 'u2', 'k4': 'u4'}
-
- common.Config.update_config(target, override)
- self.assertEqual(result, target)
-
- @mock.patch.object(common.Config, 'update_config')
- @mock.patch.object(common.Config, '_build_config_from_env_variables',
- return_value={})
- @mock.patch.dict('os.environ', clear=True,
- values={common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR:
- '/this/should/not/exist.json'})
- def test_get_missing_static_config_missing_file(self, from_env, update):
- cfg = common.Config.get()
- self.assertEqual({}, cfg)
- from_env.assert_called_once_with()
- update.assert_not_called()
-
- @mock.patch.object(common.Config, 'update_config')
- @mock.patch.object(common.Config, '_build_config_from_env_variables',
- return_value={})
- def test_get_empty_static_config_present_file(self, from_env, update):
- file_cfg = {'k1': 'v1', 'k2': 'v2'}
- env_var = common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR
- file = tempfile.NamedTemporaryFile(delete=False)
- json.dump(file_cfg, file)
- file.close()
-
- with mock.patch.dict('os.environ', {env_var: file.name}, clear=True):
- common.Config.get()
-
- os.unlink(file.name)
- from_env.assert_called_once_with()
- update.assert_called_once_with({}, file_cfg)
-
- @mock.patch.object(common.Config, 'update_config')
- @mock.patch.object(common.Config, '_build_config_from_env_variables',
- return_value={'k1': 'v1'})
- def test_get_present_static_config_empty_file(self, from_env, update):
- file_cfg = {}
- env_var = common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR
- file = tempfile.NamedTemporaryFile(delete=False)
- json.dump(file_cfg, file)
- file.close()
-
- with mock.patch.dict('os.environ', {env_var: file.name}, clear=True):
- common.Config.get()
-
- os.unlink(file.name)
- from_env.assert_called_once_with()
- update.assert_called_once_with({'k1': 'v1'}, file_cfg)
-
- @mock.patch.object(common.Config, 'update_config')
- @mock.patch.object(common.Config, '_build_config_from_env_variables',
- return_value={'k1': 'v1'})
- @mock.patch.dict('os.environ', clear=True,
- values={common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR:
- '/this/should/not/exist.json'})
- def test_get_present_static_config_missing_file(self, from_env, update):
- cfg = common.Config.get()
- self.assertEqual({'k1': 'v1'}, cfg)
- from_env.assert_called_once_with()
- update.assert_not_called()
-
- @mock.patch.object(common.Config, 'update_config')
- @mock.patch.object(common.Config, '_build_config_from_env_variables',
- return_value={'k1': 'v1'})
- def test_get_all_present(self, from_env, update):
- file_cfg = {'k2': 'u2'}
- env_var = common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR
- file = tempfile.NamedTemporaryFile(delete=False)
- json.dump(file_cfg, file)
- file.close()
-
- with mock.patch.dict('os.environ', {env_var: file.name}, clear=True):
- common.Config.get()
-
- os.unlink(file.name)
- from_env.assert_called_once_with()
- update.assert_called_once_with({'k1': 'v1'}, file_cfg)
-
-
-class OpenstackClientTests(unittest.TestCase):
-
- def test__merge_custom_configuration_no_custom_cfg(self):
- cfg = {'k1': 'v1'}
- new = common.OpenStackClient._merge_custom_configuration(cfg, "dummy")
- self.assertEqual(cfg, new)
-
- def test__merge_custom_configuration_client_present(self):
- cfg = {
- 'k1': 'v1',
- 'k2': 'v2',
- 'custom_configuration': {
- 'dummy': {
- 'k2': 'u2',
- 'k3': 'u3'
- }
- }
- }
- result = {
- 'k1': 'v1',
- 'k2': 'u2',
- 'k3': 'u3'
- }
- bak = cfg.copy()
- new = common.OpenStackClient._merge_custom_configuration(cfg, "dummy")
- self.assertEqual(result, new)
- self.assertEqual(cfg, bak)
-
- def test__merge_custom_configuration_client_missing(self):
- cfg = {
- 'k1': 'v1',
- 'k2': 'v2',
- 'custom_configuration': {
- 'dummy': {
- 'k2': 'u2',
- 'k3': 'u3'
- }
- }
- }
- result = {
- 'k1': 'v1',
- 'k2': 'v2'
- }
- bak = cfg.copy()
- new = common.OpenStackClient._merge_custom_configuration(cfg, "baddy")
- self.assertEqual(result, new)
- self.assertEqual(cfg, bak)
-
- def test__merge_custom_configuration_multi_client(self):
- cfg = {
- 'k1': 'v1',
- 'k2': 'v2',
- 'custom_configuration': {
- 'dummy': {
- 'k2': 'u2',
- 'k3': 'u3'
- },
- 'bummy': {
- 'k1': 'z1'
- }
- }
- }
- result = {
- 'k1': 'z1',
- 'k2': 'v2',
- }
- bak = cfg.copy()
- new = common.OpenStackClient._merge_custom_configuration(cfg, "bummy")
- self.assertEqual(result, new)
- self.assertEqual(cfg, bak)
-
- @mock.patch.object(common, 'ctx')
- def test__merge_custom_configuration_nova_url(self, mock_ctx):
- cfg = {
- 'nova_url': 'gopher://nova',
- }
- bak = cfg.copy()
-
- self.assertEqual(
- common.OpenStackClient._merge_custom_configuration(
- cfg, 'nova_client'),
- {'endpoint_override': 'gopher://nova'},
- )
- self.assertEqual(
- common.OpenStackClient._merge_custom_configuration(
- cfg, 'dummy'),
- {},
- )
- self.assertEqual(cfg, bak)
- mock_ctx.logger.warn.assert_has_calls([
- mock.call(
- "'nova_url' property is deprecated. Use `custom_configuration."
- "nova_client.endpoint_override` instead."),
- mock.call(
- "'nova_url' property is deprecated. Use `custom_configuration."
- "nova_client.endpoint_override` instead."),
- ])
-
- @mock.patch('keystoneauth1.session.Session')
- def test___init___multi_region(self, m_session):
- mock_client_class = mock.MagicMock()
-
- cfg = {
- 'auth_url': 'test-auth_url/v3',
- 'region': 'test-region',
- }
-
- with mock.patch.object(
- builtins, 'open',
- mock.mock_open(
- read_data="""
- {
- "region": "region from file",
- "other": "this one should get through"
- }
- """
- ),
- create=True,
- ):
- common.OpenStackClient('fred', mock_client_class, cfg)
-
- mock_client_class.assert_called_once_with(
- region_name='test-region',
- other='this one should get through',
- session=m_session.return_value,
- )
-
- def test__validate_auth_params_missing(self):
- with self.assertRaises(NonRecoverableError):
- common.OpenStackClient._validate_auth_params({})
-
- def test__validate_auth_params_too_much(self):
- with self.assertRaises(NonRecoverableError):
- common.OpenStackClient._validate_auth_params({
- 'auth_url': 'url',
- 'password': 'pass',
- 'username': 'user',
- 'tenant_name': 'tenant',
- 'project_id': 'project_test',
- })
-
- def test__validate_auth_params_v2(self):
- common.OpenStackClient._validate_auth_params({
- 'auth_url': 'url',
- 'password': 'pass',
- 'username': 'user',
- 'tenant_name': 'tenant',
- })
-
- def test__validate_auth_params_v3(self):
- common.OpenStackClient._validate_auth_params({
- 'auth_url': 'url',
- 'password': 'pass',
- 'username': 'user',
- 'project_id': 'project_test',
- 'user_domain_name': 'user_domain',
- })
-
- def test__validate_auth_params_v3_mod(self):
- common.OpenStackClient._validate_auth_params({
- 'auth_url': 'url',
- 'password': 'pass',
- 'username': 'user',
- 'user_domain_name': 'user_domain',
- 'project_name': 'project_test_name',
- 'project_domain_name': 'project_domain',
- })
-
- def test__validate_auth_params_skip_insecure(self):
- common.OpenStackClient._validate_auth_params({
- 'auth_url': 'url',
- 'password': 'pass',
- 'username': 'user',
- 'user_domain_name': 'user_domain',
- 'project_name': 'project_test_name',
- 'project_domain_name': 'project_domain',
- 'insecure': True
- })
-
- def test__split_config(self):
- auth = {'auth_url': 'url', 'password': 'pass'}
- misc = {'misc1': 'val1', 'misc2': 'val2'}
- all = dict(auth)
- all.update(misc)
-
- a, m = common.OpenStackClient._split_config(all)
-
- self.assertEqual(auth, a)
- self.assertEqual(misc, m)
-
- @mock.patch.object(common, 'loading')
- @mock.patch.object(common, 'session')
- def test__authenticate_secure(self, mock_session, mock_loading):
- auth_params = {'k1': 'v1'}
- common.OpenStackClient._authenticate(auth_params)
- loader = mock_loading.get_plugin_loader.return_value
- loader.load_from_options.assert_called_once_with(k1='v1')
- auth = loader.load_from_options.return_value
- mock_session.Session.assert_called_once_with(auth=auth, verify=True)
-
- @mock.patch.object(common, 'loading')
- @mock.patch.object(common, 'session')
- def test__authenticate_secure_explicit(self, mock_session, mock_loading):
- auth_params = {'k1': 'v1', 'insecure': False}
- common.OpenStackClient._authenticate(auth_params)
- loader = mock_loading.get_plugin_loader.return_value
- loader.load_from_options.assert_called_once_with(k1='v1')
- auth = loader.load_from_options.return_value
- mock_session.Session.assert_called_once_with(auth=auth, verify=True)
-
- @mock.patch.object(common, 'loading')
- @mock.patch.object(common, 'session')
- def test__authenticate_insecure(self, mock_session, mock_loading):
- auth_params = {'k1': 'v1', 'insecure': True}
- common.OpenStackClient._authenticate(auth_params)
- loader = mock_loading.get_plugin_loader.return_value
- loader.load_from_options.assert_called_once_with(k1='v1')
- auth = loader.load_from_options.return_value
- mock_session.Session.assert_called_once_with(auth=auth, verify=False)
-
- @mock.patch.object(common, 'loading')
- @mock.patch.object(common, 'session')
- def test__authenticate_secure_misc(self, mock_session, mock_loading):
- params = {'k1': 'v1'}
- tests = ('', 'a', [], {}, set(), 4, 0, -1, 3.14, 0.0, None)
- for test in tests:
- auth_params = params.copy()
- auth_params['insecure'] = test
-
- common.OpenStackClient._authenticate(auth_params)
- loader = mock_loading.get_plugin_loader.return_value
- loader.load_from_options.assert_called_with(**params)
- auth = loader.load_from_options.return_value
- mock_session.Session.assert_called_with(auth=auth, verify=True)
-
- @mock.patch.object(common, 'cinder_client')
- def test_cinder_client_get_name_from_resource(self, cc_mock):
- ccws = common.CinderClientWithSugar()
- mock_volume = mock.Mock()
-
- self.assertIs(
- mock_volume.name,
- ccws.get_name_from_resource(mock_volume))
-
-
-class ClientsConfigTest(unittest.TestCase):
-
- def setUp(self):
- file = tempfile.NamedTemporaryFile(delete=False)
- json.dump(self.get_file_cfg(), file)
- file.close()
- self.addCleanup(os.unlink, file.name)
-
- env_cfg = self.get_env_cfg()
- env_cfg[common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR] = file.name
- mock.patch.dict('os.environ', env_cfg, clear=True).start()
-
- self.loading = mock.patch.object(common, 'loading').start()
- self.session = mock.patch.object(common, 'session').start()
- self.nova = mock.patch.object(common, 'nova_client').start()
- self.neutron = mock.patch.object(common, 'neutron_client').start()
- self.cinder = mock.patch.object(common, 'cinder_client').start()
- self.addCleanup(mock.patch.stopall)
-
- self.loader = self.loading.get_plugin_loader.return_value
- self.auth = self.loader.load_from_options.return_value
-
-
-class CustomConfigFromInputs(ClientsConfigTest):
-
- def get_file_cfg(self):
- return {
- 'username': 'file-username',
- 'password': 'file-password',
- 'tenant_name': 'file-tenant-name',
- 'custom_configuration': {
- 'nova_client': {
- 'username': 'custom-username',
- 'password': 'custom-password',
- 'tenant_name': 'custom-tenant-name'
- },
- }
- }
-
- def get_inputs_cfg(self):
- return {
- 'auth_url': 'envar-auth-url',
- 'username': 'inputs-username',
- 'custom_configuration': {
- 'neutron_client': {
- 'password': 'inputs-custom-password'
- },
- 'cinder_client': {
- 'password': 'inputs-custom-password',
- 'auth_url': 'inputs-custom-auth-url',
- 'extra_key': 'extra-value'
- },
- }
- }
-
- def get_env_cfg(self):
- return {
- 'OS_USERNAME': 'envar-username',
- 'OS_PASSWORD': 'envar-password',
- 'OS_TENANT_NAME': 'envar-tenant-name',
- 'OS_AUTH_URL': 'envar-auth-url',
- common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR: file.name
- }
-
- def test_nova(self):
- common.NovaClientWithSugar(config=self.get_inputs_cfg())
- self.loader.load_from_options.assert_called_once_with(
- username='inputs-username',
- password='file-password',
- tenant_name='file-tenant-name',
- auth_url='envar-auth-url'
- )
- self.session.Session.assert_called_with(auth=self.auth, verify=True)
- self.nova.Client.assert_called_once_with(
- '2', session=self.session.Session.return_value)
-
- def test_neutron(self):
- common.NeutronClientWithSugar(config=self.get_inputs_cfg())
- self.loader.load_from_options.assert_called_once_with(
- username='inputs-username',
- password='inputs-custom-password',
- tenant_name='file-tenant-name',
- auth_url='envar-auth-url'
- )
- self.session.Session.assert_called_with(auth=self.auth, verify=True)
- self.neutron.Client.assert_called_once_with(
- session=self.session.Session.return_value)
-
- def test_cinder(self):
- common.CinderClientWithSugar(config=self.get_inputs_cfg())
- self.loader.load_from_options.assert_called_once_with(
- username='inputs-username',
- password='inputs-custom-password',
- tenant_name='file-tenant-name',
- auth_url='inputs-custom-auth-url'
- )
- self.session.Session.assert_called_with(auth=self.auth, verify=True)
- self.cinder.Client.assert_called_once_with(
- '2', session=self.session.Session.return_value,
- extra_key='extra-value')
-
-
-class CustomConfigFromFile(ClientsConfigTest):
-
- def get_file_cfg(self):
- return {
- 'username': 'file-username',
- 'password': 'file-password',
- 'tenant_name': 'file-tenant-name',
- 'custom_configuration': {
- 'nova_client': {
- 'username': 'custom-username',
- 'password': 'custom-password',
- 'tenant_name': 'custom-tenant-name'
- },
- }
- }
-
- def get_inputs_cfg(self):
- return {
- 'auth_url': 'envar-auth-url',
- 'username': 'inputs-username',
- }
-
- def get_env_cfg(self):
- return {
- 'OS_USERNAME': 'envar-username',
- 'OS_PASSWORD': 'envar-password',
- 'OS_TENANT_NAME': 'envar-tenant-name',
- 'OS_AUTH_URL': 'envar-auth-url',
- common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR: file.name
- }
-
- def test_nova(self):
- common.NovaClientWithSugar(config=self.get_inputs_cfg())
- self.loader.load_from_options.assert_called_once_with(
- username='custom-username',
- password='custom-password',
- tenant_name='custom-tenant-name',
- auth_url='envar-auth-url'
- )
- self.session.Session.assert_called_with(auth=self.auth, verify=True)
- self.nova.Client.assert_called_once_with(
- '2', session=self.session.Session.return_value)
-
- def test_neutron(self):
- common.NeutronClientWithSugar(config=self.get_inputs_cfg())
- self.loader.load_from_options.assert_called_once_with(
- username='inputs-username',
- password='file-password',
- tenant_name='file-tenant-name',
- auth_url='envar-auth-url'
- )
- self.session.Session.assert_called_with(auth=self.auth, verify=True)
- self.neutron.Client.assert_called_once_with(
- session=self.session.Session.return_value)
-
- def test_cinder(self):
- common.CinderClientWithSugar(config=self.get_inputs_cfg())
- self.loader.load_from_options.assert_called_once_with(
- username='inputs-username',
- password='file-password',
- tenant_name='file-tenant-name',
- auth_url='envar-auth-url'
- )
- self.session.Session.assert_called_with(auth=self.auth, verify=True)
- self.cinder.Client.assert_called_once_with(
- '2', session=self.session.Session.return_value)
-
-
-class PutClientInKwTests(unittest.TestCase):
-
- def test_override_prop_empty_ctx(self):
- props = {}
- ctx = MockCloudifyContext(node_id='a20846', properties=props)
- kwargs = {
- 'ctx': ctx,
- 'openstack_config': {
- 'p1': 'v1'
- }
- }
- expected_cfg = kwargs['openstack_config']
-
- client_class = mock.MagicMock()
- common._put_client_in_kw('mock_client', client_class, kwargs)
- client_class.assert_called_once_with(config=expected_cfg)
-
- def test_override_prop_nonempty_ctx(self):
- props = {
- 'openstack_config': {
- 'p1': 'u1',
- 'p2': 'u2'
- }
- }
- props_copy = props.copy()
- ctx = MockCloudifyContext(node_id='a20846', properties=props)
- kwargs = {
- 'ctx': ctx,
- 'openstack_config': {
- 'p1': 'v1',
- 'p3': 'v3'
- }
- }
- expected_cfg = {
- 'p1': 'v1',
- 'p2': 'u2',
- 'p3': 'v3'
- }
-
- client_class = mock.MagicMock()
- common._put_client_in_kw('mock_client', client_class, kwargs)
- client_class.assert_called_once_with(config=expected_cfg)
- # Making sure that _put_client_in_kw will not modify
- # 'openstack_config' property of a node.
- self.assertEqual(props_copy, ctx.node.properties)
-
- def test_override_runtime_prop(self):
- props = {
- 'openstack_config': {
- 'p1': 'u1',
- 'p2': 'u2'
- }
- }
- runtime_props = {
- 'openstack_config': {
- 'p1': 'u3'
- }
- }
- props_copy = props.copy()
- runtime_props_copy = runtime_props.copy()
- ctx = MockCloudifyContext(node_id='a20847', properties=props,
- runtime_properties=runtime_props)
- kwargs = {
- 'ctx': ctx
- }
- expected_cfg = {
- 'p1': 'u3',
- 'p2': 'u2'
- }
- client_class = mock.MagicMock()
- common._put_client_in_kw('mock_client', client_class, kwargs)
- client_class.assert_called_once_with(config=expected_cfg)
- self.assertEqual(props_copy, ctx.node.properties)
- self.assertEqual(runtime_props_copy, ctx.instance.runtime_properties)
-
-
-class ResourceQuotaTests(unittest.TestCase):
-
- def _test_quota_validation(self, amount, quota, failure_expected):
- ctx = MockCloudifyContext(node_id='node_id', properties={})
- client = mock.MagicMock()
-
- def mock_cosmo_list(_):
- return [x for x in range(0, amount)]
- client.cosmo_list = mock_cosmo_list
-
- def mock_get_quota(_):
- return quota
- client.get_quota = mock_get_quota
-
- if failure_expected:
- self.assertRaisesRegexp(
- NonRecoverableError,
- 'cannot be created due to quota limitations',
- common.validate_resource,
- ctx=ctx, sugared_client=client,
- openstack_type='openstack_type')
- else:
- common.validate_resource(
- ctx=ctx, sugared_client=client,
- openstack_type='openstack_type')
-
- def test_equals_quotas(self):
- self._test_quota_validation(3, 3, True)
-
- def test_exceeded_quota(self):
- self._test_quota_validation(5, 3, True)
-
- def test_infinite_quota(self):
- self._test_quota_validation(5, -1, False)
-
-
-class UseExternalResourceTests(unittest.TestCase):
-
- def _test_use_external_resource(self,
- is_external,
- create_if_missing,
- exists):
- properties = {'create_if_missing': create_if_missing,
- 'use_external_resource': is_external,
- 'resource_id': 'resource_id'}
- client_mock = mock.MagicMock()
- os_type = 'test'
-
- def _raise_error(*_):
- raise NonRecoverableError('Error')
-
- def _return_something(*_):
- return mock.MagicMock()
-
- return_value = _return_something if exists else _raise_error
- if exists:
- properties.update({'resource_id': 'rid'})
-
- node_context = MockCloudifyContext(node_id='a20847',
- properties=properties)
- with mock.patch(
- 'openstack_plugin_common._get_resource_by_name_or_id_from_ctx',
- new=return_value):
- return common.use_external_resource(node_context,
- client_mock, os_type)
-
- def test_use_existing_resource(self):
- self.assertIsNotNone(self._test_use_external_resource(True, True,
- True))
- self.assertIsNotNone(self._test_use_external_resource(True, False,
- True))
-
- def test_create_resource(self):
- self.assertIsNone(self._test_use_external_resource(False, True, False))
- self.assertIsNone(self._test_use_external_resource(False, False,
- False))
- self.assertIsNone(self._test_use_external_resource(True, True, False))
-
- def test_raise_error(self):
- # If exists and shouldn't it is checked in resource
- # validation so below scenario is not tested here
- self.assertRaises(NonRecoverableError,
- self._test_use_external_resource,
- is_external=True,
- create_if_missing=False,
- exists=False)
-
-
-class ValidateResourceTests(unittest.TestCase):
-
- def _test_validate_resource(self,
- is_external,
- create_if_missing,
- exists,
- client_mock_provided=None):
- properties = {'create_if_missing': create_if_missing,
- 'use_external_resource': is_external,
- 'resource_id': 'resource_id'}
- client_mock = client_mock_provided or mock.MagicMock()
- os_type = 'test'
-
- def _raise_error(*_):
- raise NonRecoverableError('Error')
-
- def _return_something(*_):
- return mock.MagicMock()
- return_value = _return_something if exists else _raise_error
- if exists:
- properties.update({'resource_id': 'rid'})
-
- node_context = MockCloudifyContext(node_id='a20847',
- properties=properties)
- with mock.patch(
- 'openstack_plugin_common._get_resource_by_name_or_id_from_ctx',
- new=return_value):
- return common.validate_resource(node_context, client_mock, os_type)
-
- def test_use_existing_resource(self):
- self._test_validate_resource(True, True, True)
- self._test_validate_resource(True, False, True)
-
- def test_create_resource(self):
- client_mock = mock.MagicMock()
- client_mock.cosmo_list.return_value = ['a', 'b', 'c']
- client_mock.get_quota.return_value = 5
- self._test_validate_resource(False, True, False, client_mock)
- self._test_validate_resource(False, False, False, client_mock)
- self._test_validate_resource(True, True, False, client_mock)
-
- def test_raise_error(self):
- # If exists and shouldn't it is checked in resource
- # validation so below scenario is not tested here
- self.assertRaises(NonRecoverableError,
- self._test_validate_resource,
- is_external=True,
- create_if_missing=False,
- exists=False)
-
- def test_raise_quota_error(self):
- client_mock = mock.MagicMock()
- client_mock.cosmo_list.return_value = ['a', 'b', 'c']
- client_mock.get_quota.return_value = 3
- self.assertRaises(NonRecoverableError,
- self._test_validate_resource,
- is_external=True,
- create_if_missing=True,
- exists=False,
- client_mock_provided=client_mock)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/provider-context.json b/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/provider-context.json
deleted file mode 100644
index f7e20e4ef5..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/provider-context.json
+++ /dev/null
@@ -1,78 +0,0 @@
-{
- "context": {
- "resources": {
- "management_keypair": {
- "name": "p2_cloudify-manager-kp-ilya",
- "id": "p2_cloudify-manager-kp-ilya",
- "type": "keypair",
- "external_resource": true
- },
- "router": {
- "name": "p2_cloudify-router",
- "id": "856f9fb8-6676-4b99-b64d-b76874b30abf",
- "type": "router",
- "external_resource": true
- },
- "subnet": {
- "name": "p2_cloudify-admin-network-subnet",
- "id": "dd193491-d728-4e3e-8199-27eec0ba18e4",
- "type": "subnet",
- "external_resource": true
- },
- "int_network": {
- "name": "p2_cloudify-admin-network",
- "id": "27ef2770-5219-4bb1-81d4-14ed450c5181",
- "type": "network",
- "external_resource": true
- },
- "management_server": {
- "name": "p2_cfy-mgr-ilya-2014-06-01-11:59",
- "id": "be9991da-9c34-4f7c-9c33-5e04ad2d5b3e",
- "type": "server",
- "external_resource": false
- },
- "agents_security_group": {
- "name": "p2_cloudify-sg-agents",
- "id": "d52280aa-0e79-4697-bd08-baf3f84e2a10",
- "type": "neutron security group",
- "external_resource": true
- },
- "agents_keypair": {
- "name": "p2_cloudify-agents-kp-ilya",
- "id": "p2_cloudify-agents-kp-ilya",
- "type": "keypair",
- "external_resource": true
- },
- "management_security_group": {
- "name": "p2_cloudify-sg-management",
- "id": "5862e0d2-8f28-472e-936b-d2da9cb935b3",
- "type": "neutron security group",
- "external_resource": true
- },
- "floating_ip": {
- "external_resource": true,
- "id": "None",
- "type": "floating ip",
- "ip": "CENSORED"
- },
- "ext_network": {
- "name": "Ext-Net",
- "id": "7da74520-9d5e-427b-a508-213c84e69616",
- "type": "network",
- "external_resource": true
- }
- },
- "cloudify": {
- "resources_prefix": "p2_",
- "cloudify_agent": {
- "user": "ubuntu",
- "agent_key_path": "/PATH/CENSORED/p2_cloudify-agents-kp-ilya.pem",
- "min_workers": 2,
- "max_workers": 5,
- "remote_execution_port": 22
- }
- }
- },
- "name": "cloudify_openstack"
-}
-
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/test.py b/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/test.py
deleted file mode 100644
index 13099292ca..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/openstack_plugin_common/tests/test.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import json
-import os
-
-from cloudify.context import BootstrapContext
-
-from cloudify.mocks import MockCloudifyContext
-
-
-RETRY_AFTER = 1
-# Time during which no retry could possibly happen.
-NO_POSSIBLE_RETRY_TIME = RETRY_AFTER / 2.0
-
-BOOTSTRAP_CONTEXTS_WITHOUT_PREFIX = (
- {
- },
- {
- 'resources_prefix': ''
- },
- {
- 'resources_prefix': None
- },
-)
-
-
-def set_mock_provider_context(ctx, provider_context):
-
- def mock_provider_context(provider_name_unused):
- return provider_context
-
- ctx.get_provider_context = mock_provider_context
-
-
-def create_mock_ctx_with_provider_info(*args, **kw):
- cur_dir = os.path.dirname(os.path.realpath(__file__))
- full_file_name = os.path.join(cur_dir, 'provider-context.json')
- with open(full_file_name) as f:
- provider_context = json.loads(f.read())['context']
- kw['provider_context'] = provider_context
- kw['bootstrap_context'] = BootstrapContext(provider_context['cloudify'])
- return MockCloudifyContext(*args, **kw)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/plugin.yaml b/aria/multivim-plugin/src/main/python/multivim-plugin/plugin.yaml
deleted file mode 100644
index 6df0764e94..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/plugin.yaml
+++ /dev/null
@@ -1,1178 +0,0 @@
-#
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-
-topology_template:
- policies:
- onap-multivim-plugin:
- description: >-
- multivim plugin executes operations.
- type: aria.Plugin
- properties:
- version: 2.0.1
-
-
-data_types:
-
- onap.multivim.datatypes.Config:
- description: >-
- multivim configuration
- properties:
- username:
- type: string
- password:
- type: string
- tenant_name:
- type: string
- auth_url:
- type: string
- region:
- type: string
- required: false
- nova_url:
- type: string
- required: false
- neutron_url:
- type: string
- required: false
-
- onap.multivim.datatypes.Rules:
- description: >-
- multivim security group rules
- properties:
- remote_ip_prefix:
- type: string
- default: 0.0.0.0/0
- port:
- type: integer
- default:
-
- # source: https://developer.multivim.org/api-ref/compute/
-
- onap.multivim.datatypes.Server:
- description: >-
- multivim Server args.
- properties:
- security_groups:
- type: list
- entry_schema: string
- required: false
- availability_zone:
- type: string
- required: false
- userdata:
- type: string
- required: false
- metadata:
- type: map
- entry_schema: string
- required: false
-
- onap.multivim.datatypes.Keypair:
- description: >-
- multivim keypair args.
- properties:
- public_key:
- type: string
- required: false
- type:
- type: string
- required: false
- user_id:
- type: string
- required: false
-
- # source: https://developer.multivim.org/api-ref/block-storage/v2/index.html
-
- onap.multivim.datatypes.Volume:
- description: >-
- multivim volume args.
- properties:
- size:
- type: integer
- required: false
- description:
- type: string
- required: false
- availability_zone:
- type: string
- required: false
- consistencygroup_id:
- type: string
- required: false
- volume_type:
- type: string
- required: false
- snapshot_id:
- type: string
- required: false
- source_replica:
- type: string
- required: false
- tenant_id:
- type: string
- required: false
-
- # source: https://developer.multivim.org/api-ref/image/
-
- onap.multivim.datatypes.Image:
- description: >-
- multivim image args.
- properties:
- id:
- type: string
- required: false
- min_disk:
- type: integer
- required: false
- min_ram:
- type: integer
- required: false
- name:
- type: string
- required: false
- protected:
- type: boolean
- required: false
- tags:
- type: list
- entry_schema: string
- required: false
- visibility:
- type: string
- required: false
-
- # source: https://developer.multivim.org/api-ref/identity/v3/
-
- onap.multivim.datatypes.Project:
- description: >-
- multivim image args.
- properties:
- is_domain:
- type: boolean
- required: false
- description:
- type: string
- required: false
- domain_id:
- type: string
- required: false
- name:
- type: string
- required: false
- enabled:
- type: boolean
- required: false
- parent_id:
- type: string
- required: false
-
- # source: https://developer.multivim.org/api-ref/networking/v2/index.html
-
- onap.multivim.datatypes.Subnet:
- description: >-
- multivim subnet args.
- properties:
- network_id:
- type: string
- required: false
- ip_version:
- type: integer
- required: false
- default: 4
- cidr:
- type: string
- required: false
- gateway_ip:
- type: string
- required: false
- dns_nameservers:
- type: list
- entry_schema: string
- required: false
- enable_dhcp:
- type: boolean
- required: false
- tenant_id:
- type: string
- required: false
-
- onap.multivim.datatypes.Port:
- description: >-
- multivim port args
- properties:
- network_id:
- type: string
- required: false
- admin_state_up:
- type: boolean
- required: false
- status:
- type: string
- required: false
- mac_address:
- type: string
- required: false
- device_id:
- type: string
- required: false
- device_owner:
- type: string
- required: false
- tenant_id:
- type: string
- required: false
-
- onap.multivim.datatypes.Network:
- description: >-
- multivim network args
- properties:
- admin_state_up:
- type: boolean
- required: false
- status:
- type: string
- required: false
- subnets:
- type: list
- entry_schema: string
- required: false
- shared:
- type: boolean
- required: false
- tenant_id:
- type: string
- required: false
-
- onap.multivim.datatypes.SecurityGroup:
- description: >-
- multivim network args
- properties:
- admin_state_up:
- type: boolean
- required: false
- port_security_enabled:
- type: boolean
- required: false
- project_id:
- type: string
- required: false
- qos_policy_id:
- type: string
- required: false
- segments:
- type: list
- entry_schema: string
- required: false
- shared:
- type: boolean
- required: false
- vlan_transparent:
- type: boolean
- required: false
- tenant_id:
- type: string
- required: false
-
- onap.multivim.datatypes.Router:
- description: >-
- multivim network args
- properties:
- bgpvpn_id:
- type: string
- required: false
- router_id:
- type: string
- required: false
-
- onap.multivim.datatypes.FloatingIP:
- description: >-
- multivim network args
- properties:
- tenant_id:
- type: string
- required: false
- project_id:
- type: string
- required: false
- floating_network_id:
- type: string
- required: false
- floating_network_name:
- type: string
- required: false
- fixed_ip_address:
- type: string
- required: false
- floating_ip_address:
- type: string
- required: false
- port_id:
- type: string
- required: false
- subnet_id:
- type: string
- required: false
-
-
-interface_types:
-
- onap.multivim.interfaces.validation:
- derived_from: tosca.interfaces.Root
- creation:
- description: >-
- creation operation for the multivim validation interface
- deletion:
- description: >-
- deletion operation for the multivim validation interface
-
-
-node_types:
-
- onap.multivim.nodes.Server:
- derived_from: tosca.nodes.Compute
- properties:
- server:
- default: {}
- type: onap.multivim.datatypes.Server
- required: false
- ip:
- default:
- type: string
- os_family:
- description: >-
- Property specifying what type of operating system family
- this compute node will run.
- default: linux
- type: string
- use_external_resource:
- type: boolean
- default: false
- description: >-
- a boolean for setting whether to create the resource or use an existing one.
- See the using existing resources section.
- create_if_missing:
- default: false
- type: boolean
- description: >-
- If use_external_resource is ``true`` and the resource is missing,
- create it instead of failing.
- resource_id:
- default: ''
- type: string
- description: >-
- name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
- image:
- default: ''
- type: string
- description: >-
- The image for the server.
- May receive either the ID or the name of the image.
- note: This property is currently optional for backwards compatibility,
- but will be modified to become a required property in future versions
- (Default: '').
- flavor:
- default: ''
- type: string
- description: >-
- The flavor for the server.
- May receive either the ID or the name of the flavor.
- note: This property is currently optional for backwards compatibility,
- but will be modified to become a required property in future versions
- (Default: '').
- use_password:
- default: false
- type: boolean
- description: >-
- A boolean describing whether this server image supports user-password authentication.
- Images that do should post the administrator user's password to the Openstack metadata service (e.g. via cloudbase);
- The password would then be retrieved by the plugin,
- decrypted using the server's keypair and then saved in the server's runtime properties.
- management_network_name:
- type: string
- description: >-
- The current implementation of the multivim plugin requires this field. The value of
- this field should be set to the multivim name of a network this server is attached to.
- multivim_config:
- type: onap.multivim.datatypes.Config
- required: false
- description: >-
- see Openstack Configuraion
- interfaces:
- Standard:
- create:
- implementation: onap-multivim-plugin > nova_plugin.server.create
- inputs:
- args:
- required: false
- default: {}
- type: onap.multivim.datatypes.Server
- start:
- implementation: onap-multivim-plugin > nova_plugin.server.start
- inputs:
- start_retry_interval:
- default: 30
- type: integer
- private_key_path:
- type: string
- default: ''
- required: true
- stop: onap-multivim-plugin > nova_plugin.server.stop
- delete: onap-multivim-plugin > nova_plugin.server.delete
- Validation:
- type: onap.multivim.interfaces.validation
- creation:
- implementation: onap-multivim-plugin > nova_plugin.server.creation_validation
- inputs:
- args:
- required: false
- default: {}
- type: onap.multivim.datatypes.Server
-
- requirements:
- - floating_ip:
- capability: tosca.capabilities.Node
- node: onap.multivim.nodes.FloatingIP
- relationship: onap.multivim.server_connected_to_floating_ip
- occurrences: [ 0, UNBOUNDED ]
- - security_group:
- capability: tosca.capabilities.Node
- node: onap.multivim.nodes.SecurityGroup
- relationship: onap.multivim.server_connected_to_security_group
- occurrences: [ 0, UNBOUNDED ]
- - port:
- capability: tosca.capabilities.Node
- node: onap.multivim.nodes.Port
- relationship: onap.multivim.server_connected_to_port
- occurrences: [ 0, UNBOUNDED ]
- - key_pair:
- capability: tosca.capabilities.Node
- node: onap.multivim.nodes.KeyPair
- relationship: onap.multivim.server_connected_to_keypair
- occurrences: [ 0, UNBOUNDED ]
- capabilities:
- multivim_container:
- type: Node
-
- onap.multivim.nodes.WindowsServer:
- derived_from: onap.multivim.nodes.Server
- properties:
- use_password:
- default: true
- type: boolean
- description: >-
- Default changed for derived type
- because Windows instances need a password for agent installation
- os_family:
- default: windows
- type: string
- description: >-
- (updates the os_family default as a convenience)
-
- onap.multivim.nodes.KeyPair:
- derived_from: tosca.nodes.Root
- properties:
- keypair:
- default: {}
- type: onap.multivim.datatypes.Keypair
- required: false
- description: >-
- the path (on the machine the plugin is running on) to
- where the private key should be stored. If
- use_external_resource is set to "true", the existing
- private key is expected to be at this path.
- private_key_path:
- description: >
- the path (on the machine the plugin is running on) to
- where the private key should be stored. If
- use_external_resource is set to "true", the existing
- private key is expected to be at this path.
- type: string
- use_external_resource:
- type: boolean
- default: false
- description: >-
- a boolean describing whether this resource should be
- created or rather that it already exists on Openstack
- and should be used as-is.
- create_if_missing:
- default: false
- type: boolean
- description: >-
- If use_external_resource is ``true`` and the resource is missing,
- create it instead of failing.
- resource_id:
- default: ''
- type: string
- description: >-
- the name that will be given to the resource on Openstack (excluding optional prefix).
- If not provided, a default name will be given instead.
- If use_external_resource is set to "true", this exact
- value (without any prefixes applied) will be looked for
- as either the name or id of an existing keypair to be used.
- multivim_config:
- type: onap.multivim.datatypes.Config
- required: false
- interfaces:
- Standard:
- create:
- implementation: onap-multivim-plugin > nova_plugin.keypair.create
- inputs:
- args:
- required: false
- default: {}
- type: onap.multivim.datatypes.Keypair
-
- delete: onap-multivim-plugin > nova_plugin.keypair.delete
-
- Validation:
- type: onap.multivim.interfaces.validation
- creation: onap-multivim-plugin > nova_plugin.keypair.creation_validation
-
- capabilities:
- keypair:
- type: tosca.capabilities.Node
-
- onap.multivim.nodes.Subnet:
- derived_from: tosca.nodes.Root
- properties:
- subnet:
- type: onap.multivim.datatypes.Subnet
- required: false
- default:
- cidr: 172.16.0.0/16
- use_external_resource:
- type: boolean
- default: false
- description: >-
- a boolean for setting whether to create the resource or use an existing one.
- See the using existing resources section.
- create_if_missing:
- default: false
- type: boolean
- description: >-
- If use_external_resource is ``true`` and the resource is missing,
- create it instead of failing.
- resource_id:
- default: ''
- type: string
- description: >-
- name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
- multivim_config:
- type: onap.multivim.datatypes.Config
- required: false
- interfaces:
- Standard:
- create:
- implementation: onap-multivim-plugin > neutron_plugin.subnet.create
- inputs:
- args:
- required: false
- type: onap.multivim.datatypes.Subnet
- default:
- cidr: 172.16.0.0/16
- delete: onap-multivim-plugin > neutron_plugin.subnet.delete
- Validation:
- type: onap.multivim.interfaces.validation
- creation:
- implementation: onap-multivim-plugin > neutron_plugin.subnet.creation_validation
- inputs:
- args:
- type: onap.multivim.datatypes.Subnet
- required: false
- default:
- cidr: 172.16.0.0/16
-
- requirements:
- - router:
- capability: tosca.capabilities.Node
- node: onap.multivim.nodes.Router
- relationship: onap.multivim.subnet_connected_to_router
- occurrences: [ 0, UNBOUNDED ]
- - network:
- capability: tosca.capabilities.Node
- node: onap.multivim.nodes.Network
- capabilities:
- subnet:
- type: tosca.capabilities.Node
-
- onap.multivim.nodes.SecurityGroup:
- derived_from: tosca.nodes.Root
- properties:
- security_group:
- type: onap.multivim.datatypes.SecurityGroup
- required: false
- default: {}
- description:
- type: string
- default: ''
- description: >-
- SecurityGroup description.
- create_if_missing:
- default: false
- type: boolean
- description: >-
- If use_external_resource is ``true`` and the resource is missing,
- create it instead of failing.
- use_external_resource:
- type: boolean
- default: false
- description: >-
- a boolean for setting whether to create the resource or use an existing one.
- See the using existing resources section.
- resource_id:
- default: ''
- type: string
- description: >-
- name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
- multivim_config:
- type: onap.multivim.datatypes.Config
- required: false
- rules:
- default: []
- type: list
- entry_schema: onap.multivim.datatypes.Rules
- disable_default_egress_rules:
- default: false
- type: boolean
- description: >-
- a flag for removing the default rules which https://wiki.multivim.org/wiki/Neutron/SecurityGroups#Behavior. If not set to `true`, these rules will remain, and exist alongside any additional rules passed using the `rules` property.
- interfaces:
- Standard:
- create:
- implementation: onap-multivim-plugin > neutron_plugin.security_group.create
- inputs:
- args:
- type: onap.multivim.datatypes.SecurityGroup
- required: false
- default: {}
- delete: onap-multivim-plugin > neutron_plugin.security_group.delete
-
- Validation:
- type: onap.multivim.interfaces.validation
- creation: onap-multivim-plugin > neutron_plugin.security_group.creation_validation
-
- capabilities:
- security:
- type: tosca.capabilities.Node
-
- onap.multivim.nodes.Router:
- derived_from: tosca.nodes.Root
- properties:
- router:
- type: onap.multivim.datatypes.Router
- required: false
- default: {}
- external_network:
- default: ''
- type: string
- description: >-
- An external network name or ID.
- If given, the router will use this external network as a gateway.
- use_external_resource:
- type: boolean
- default: false
- description: >-
- a boolean for setting whether to create the resource or use an existing one.
- See the using existing resources section.
- create_if_missing:
- default: false
- type: boolean
- description: >-
- If use_external_resource is ``true`` and the resource is missing,
- create it instead of failing.
- resource_id:
- default: ''
- description: >-
- name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
- type: string
- multivim_config:
- type: onap.multivim.datatypes.Config
- required: false
-
- interfaces:
- Standard:
- create:
- implementation: onap-multivim-plugin > neutron_plugin.router.create
- inputs:
- args:
- default: {}
- type: onap.multivim.datatypes.Router
- required: false
- delete: onap-multivim-plugin > neutron_plugin.router.delete
- Validation:
- type: onap.multivim.interfaces.validation
- creation: onap-multivim-plugin > neutron_plugin.router.creation_validation
-
- capabilities:
- gateway:
- type: tosca.capabilities.Node
-
- onap.multivim.nodes.Port:
- derived_from: tosca.nodes.Root
- properties:
- port:
- type: onap.multivim.datatypes.Port
- required: false
- default: {}
- fixed_ip:
- default: ''
- type: string
- description: >-
- may be used to request a specific fixed IP for the port.
- If the IP is unavailable
- (either already taken or does not belong to a subnet the port is on)
- an error will be raised.
- use_external_resource:
- type: boolean
- default: false
- description: >-
- a boolean for setting whether to create the resource or use an existing one.
- See the using existing resources section.
- create_if_missing:
- default: false
- type: boolean
- description: >-
- If use_external_resource is ``true`` and the resource is missing,
- create it instead of failing.
- resource_id:
- default: ''
- type: string
- description: >-
- name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
- multivim_config:
- type: onap.multivim.datatypes.Config
- required: false
-
- interfaces:
- Standard:
- create:
- implementation: onap-multivim-plugin > neutron_plugin.port.create
- inputs:
- args:
- default: {}
- type: onap.multivim.datatypes.Port
- required: false
-
- delete: onap-multivim-plugin > neutron_plugin.port.delete
-
- Validation:
- type: onap.multivim.interfaces.validation
- creation: onap-multivim-plugin > neutron_plugin.port.creation_validation
-
- requirements:
- - security_group:
- capability: tosca.capabilities.Node
- node: onap.multivim.nodes.SecurityGroup
- relationship: onap.multivim.port_connected_to_security_group
- occurrences: [ 0, UNBOUNDED ]
- - floating_ip:
- capability: tosca.capabilities.Node
- node: onap.multivim.nodes.FloatingIP
- relationship: onap.multivim.port_connected_to_floating_ip
- occurrences: [ 0, UNBOUNDED ]
- - subnet:
- capability: tosca.capabilities.Node
- node: onap.multivim.nodes.Subnet
- relationship: onap.multivim.port_connected_to_subnet
- - network:
- capability: tosca.capabilities.Node
- node: onap.multivim.nodes.Network
- occurrences: [ 0, UNBOUNDED ]
- capabilities:
- entry_point:
- type: tosca.capabilities.Node
-
- onap.multivim.nodes.Network:
- derived_from: tosca.nodes.Root
- properties:
- network:
- type: onap.multivim.datatypes.Network
- required: false
- default: {}
- use_external_resource:
- type: boolean
- default: false
- description: >-
- a boolean for setting whether to create the resource or use an existing one.
- See the using existing resources section.
- create_if_missing:
- default: false
- type: boolean
- description: >-
- If use_external_resource is ``true`` and the resource is missing,
- create it instead of failing.
- resource_id:
- default: ''
- type: string
- description: >-
- name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
- multivim_config:
- type: onap.multivim.datatypes.Config
- required: false
- interfaces:
- Standard:
- create:
- implementation: onap-multivim-plugin > neutron_plugin.network.create
- inputs:
- args:
- default: {}
- type: onap.multivim.datatypes.Network
- required: false
-
- delete: onap-multivim-plugin > neutron_plugin.network.delete
-
- Validation:
- type: onap.multivim.interfaces.validation
- creation: onap-multivim-plugin > neutron_plugin.network.creation_validation
-
- capabilities:
- address_space:
- type: tosca.capabilities.Node
-
- onap.multivim.nodes.FloatingIP:
- derived_from: tosca.nodes.Root
- attributes:
- floating_ip_address:
- type: string
- properties:
- floatingip:
- type: onap.multivim.datatypes.FloatingIP
- required: false
- default: {}
- use_external_resource:
- type: boolean
- default: false
- description: >-
- a boolean for setting whether to create the resource or use an existing one.
- See the using existing resources section.
- create_if_missing:
- default: false
- type: boolean
- description: >-
- If use_external_resource is ``true`` and the resource is missing,
- create it instead of failing.
- resource_id:
- description: IP address of the floating IP
- default: ''
- type: string
- description: >-
- name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
- multivim_config:
- type: onap.multivim.datatypes.Config
- required: false
-
- interfaces:
- Standard:
- create:
- implementation: onap-multivim-plugin > neutron_plugin.floatingip.create
- inputs:
- args:
- default: {}
- type: onap.multivim.datatypes.FloatingIP
- required: false
-
- delete: onap-multivim-plugin > neutron_plugin.floatingip.delete
-
- Validation:
- type: onap.multivim.interfaces.validation
- creation: onap-multivim-plugin > neutron_plugin.floatingip.creation_validation
-
- capabilities:
- address:
- type: tosca.capabilities.Node
-
- onap.multivim.nodes.Volume:
- derived_from: tosca.nodes.Root
- properties:
- volume:
- default: {}
- type: onap.multivim.datatypes.Volume
- description: >-
- key-value volume configuration as described in http://developer.multivim.org/api-ref-blockstorage-v1.html#volumes-v1. (**DEPRECATED - Use the `args` input in create operation instead**)
- use_external_resource:
- type: boolean
- default: false
- description: >-
- a boolean for setting whether to create the resource or use an existing one.
- See the using existing resources section.
- create_if_missing:
- default: false
- type: boolean
- description: >-
- If use_external_resource is ``true`` and the resource is missing,
- create it instead of failing.
- resource_id:
- default:
- type: string
- description: >-
- name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
- device_name:
- default: auto
- type: string
- description: >-
- The device name this volume will be attached to.
- Default value is *auto*,
- which means multivim will auto-assign a device.
- Note that if you do explicitly set a value,
- this value may not be the actual device name assigned.
- Sometimes the device requested will not be available and multivim will assign it to a different device,
- this is why we recommend using *auto*.
- multivim_config:
- type: onap.multivim.datatypes.Config
- required: false
- boot:
- type: boolean
- default: false
- description: >-
- If a Server instance is connected to this Volume by a relationship,
- this volume will be used as the boot volume for that Server.
- interfaces:
- Standard:
- create:
- implementation: onap-multivim-plugin > cinder_plugin.volume.create
- inputs:
- args:
- default: {}
- type: onap.multivim.datatypes.Volume
- required: false
-
- status_attempts:
- description: >-
- Number of times to check for the creation's status before failing
- type: integer
- default: 20
- status_timeout:
- description: >-
- Interval (in seconds) between subsequent inquiries of the creation's
- status
- type: integer
- default: 15
- delete: onap-multivim-plugin > cinder_plugin.volume.delete
-
- Validation:
- type: onap.multivim.interfaces.validation
- creation: onap-multivim-plugin > cinder_plugin.volume.creation_validation
-
- requirements:
- - server:
- capability: tosca.capabilities.Node
- node: onap.multivim.nodes.Server
- relationship: onap.multivim.volume_attached_to_server
-
- onap.multivim.nodes.Image:
- derived_from: tosca.nodes.Root
- properties:
- image:
- description: >-
- Required parameters are (container_format, disk_format). Accepted
- types are available on
- http://docs.multivim.org/developer/glance/formats.html
- To create an image from the local file its path should be added
- in data parameter.
- default: {}
- type: map
- entry_schema: string
- image_url:
- default: ''
- type: string
- description: >-
- The multivim resource URL for the image.
- use_external_resource:
- default: false
- type: boolean
- description: >-
- a boolean for setting whether to create the resource or use an existing one.
- See the using existing resources section.
- create_if_missing:
- default: false
- type: boolean
- description: >-
- If use_external_resource is ``true`` and the resource is missing,
- create it instead of failing.
- resource_id:
- default: ''
- type: string
- description: >-
- name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
- multivim_config:
- type: onap.multivim.datatypes.Config
- required: false
- interfaces:
- Standard:
- create: onap-multivim-plugin > glance_plugin.image.create
-
- start:
- implementation: onap-multivim-plugin > glance_plugin.image.start
- inputs:
- start_retry_interval:
- default: 30
- type: integer
-
- delete: onap-multivim-plugin > glance_plugin.image.delete
-
- Validation:
- type: onap.multivim.interfaces.validation
- creation: onap-multivim-plugin > glance_plugin.image.creation_validation
-
- onap.multivim.nodes.Project:
- derived_from: tosca.nodes.Root
- properties:
- project:
- default: {}
- type: onap.multivim.datatypes.Project
- description: >-
- key-value project configuration.
- users:
- default: []
- type: list
- entry_schema: string
- description: >-
- List of users assigned to this project in the following format:
- { name: string, roles: [string] }
- quota:
- default: {}
- type: map
- entry_schema: string
- description: |
- A dictionary mapping service names to quota definitions for a proejct
-
- e.g::
-
- quota:
- neutron: <quota>
- nova: <quota>
- use_external_resource:
- default: false
- type: boolean
- description: >-
- a boolean for setting whether to create the resource or use an existing one.
- See the using existing resources section.
- create_if_missing:
- default: false
- type: boolean
- description: >-
- If use_external_resource is ``true`` and the resource is missing,
- create it instead of failing.
- resource_id:
- default: ''
- type: string
- description: >-
- name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string).
- multivim_config:
- type: onap.multivim.datatypes.Config
- required: false
- interfaces:
- Standard:
- create: multivim.keystone_plugin.project.create
- start: multivim.keystone_plugin.project.start
- delete: multivim.keystone_plugin.project.delete
- Validation:
- type: onap.multivim.interfaces.validation
- creation: multivim.keystone_plugin.project.creation_validation
-
-
-relationship_types:
-
- onap.multivim.port_connected_to_security_group:
- derived_from: ConnectsTo
- interfaces:
- Configure:
- add_source: onap-multivim-plugin > neutron_plugin.port.connect_security_group
-
- onap.multivim.subnet_connected_to_router:
- derived_from: ConnectsTo
- interfaces:
- Configure:
- add_target: onap-multivim-plugin > neutron_plugin.router.connect_subnet
- remove_target: onap-multivim-plugin > neutron_plugin.router.disconnect_subnet
-
- onap.multivim.server_connected_to_floating_ip:
- derived_from: ConnectsTo
- interfaces:
- Configure:
- add_source:
- implementation: onap-multivim-plugin > nova_plugin.server.connect_floatingip
- inputs:
- fixed_ip:
- description: >
- The fixed IP to be associated with the floating IP.
- If omitted, Openstack will choose which port to associate.
- type: string
- default: ''
- remove_source: onap-multivim-plugin > nova_plugin.server.disconnect_floatingip
-
- onap.multivim.port_connected_to_floating_ip:
- derived_from: ConnectsTo
- interfaces:
- Configure:
- add_source: onap-multivim-plugin > neutron_plugin.floatingip.connect_port
- remove_source: onap-multivim-plugin > neutron_plugin.floatingip.disconnect_port
-
- onap.multivim.server_connected_to_security_group:
- derived_from: ConnectsTo
- interfaces:
- Configure:
- add_source: onap-multivim-plugin > nova_plugin.server.connect_security_group
- remove_source: onap-multivim-plugin > nova_plugin.server.disconnect_security_group
-
- onap.multivim.server_connected_to_port:
- derived_from: ConnectsTo
- interfaces:
- Configure:
- remove_source: onap-multivim-plugin > neutron_plugin.port.detach
-
- onap.multivim.server_connected_to_keypair:
- derived_from: ConnectsTo
-
- onap.multivim.port_connected_to_subnet:
- derived_from: ConnectsTo
-
- onap.multivim.volume_attached_to_server:
- derived_from: ConnectsTo
- interfaces:
- Configure:
- add_target:
- implementation: onap-multivim-plugin > nova_plugin.server.attach_volume
- inputs:
-
- status_attempts:
- description: >
- Number of times to check for the attachment's status before failing
- type: integer
- default: 10
- status_timeout:
- description: >
- Interval (in seconds) between subsequent inquiries of the attachment's
- status
- type: integer
- default: 2
- remove_target:
- implementation: onap-multivim-plugin > nova_plugin.server.detach_volume
- inputs:
-
- status_attempts:
- description: >
- Number of times to check for the detachment's status before failing
- type: integer
- default: 10
- status_timeout:
- description: >
- Interval (in seconds) between subsequent inquiries of the detachment's
- status
- type: integer
- default: 2
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/readthedocs.yml b/aria/multivim-plugin/src/main/python/multivim-plugin/readthedocs.yml
deleted file mode 100644
index af59f269aa..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/readthedocs.yml
+++ /dev/null
@@ -1 +0,0 @@
-requirements_file: docs/requirements.txt
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/setup.py b/aria/multivim-plugin/src/main/python/multivim-plugin/setup.py
deleted file mode 100644
index 51387c098d..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/setup.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-from setuptools import setup
-
-
-setup(
- zip_safe=True,
- name='onap-multivim-plugin',
- version='2.2.0',
- author='idanmo',
- author_email='idan@gigaspaces.com',
- packages=[
- 'openstack_plugin_common',
- 'nova_plugin',
- 'neutron_plugin',
- 'cinder_plugin',
- 'glance_plugin',
- 'keystone_plugin'
- ],
- license='LICENSE',
- description='ONAP plugin for multivim infrastructure.',
- install_requires=[
- 'cloudify-plugins-common>=3.3.1',
- 'keystoneauth1>=2.16.0,<3',
- 'python-novaclient==7.0.0',
- 'python-keystoneclient==3.5.0',
- 'python-neutronclient==6.0.0',
- 'python-cinderclient==1.9.0',
- 'python-glanceclient==2.5.0',
- 'IPy==0.81'
- ]
-)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/__init__.py b/aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/__init__.py
deleted file mode 100644
index 3ad9513f40..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from pkgutil import extend_path
-__path__ = extend_path(__path__, __name__)
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/openstack_handler.py b/aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/openstack_handler.py
deleted file mode 100644
index 76368fa10a..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/openstack_handler.py
+++ /dev/null
@@ -1,657 +0,0 @@
-########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import random
-import logging
-import os
-import time
-import copy
-from contextlib import contextmanager
-
-from cinderclient import client as cinderclient
-from keystoneauth1 import loading, session
-import novaclient.client as nvclient
-import neutronclient.v2_0.client as neclient
-from retrying import retry
-
-from cosmo_tester.framework.handlers import (
- BaseHandler,
- BaseCloudifyInputsConfigReader)
-from cosmo_tester.framework.util import get_actual_keypath
-
-logging.getLogger('neutronclient.client').setLevel(logging.INFO)
-logging.getLogger('novaclient.client').setLevel(logging.INFO)
-
-
-VOLUME_TERMINATION_TIMEOUT_SECS = 300
-
-
-class OpenstackCleanupContext(BaseHandler.CleanupContext):
-
- def __init__(self, context_name, env):
- super(OpenstackCleanupContext, self).__init__(context_name, env)
- self.before_run = self.env.handler.openstack_infra_state()
-
- def cleanup(self):
- """
- Cleans resources created by the test.
- Resource that existed before the test will not be removed
- """
- super(OpenstackCleanupContext, self).cleanup()
- resources_to_teardown = self.get_resources_to_teardown(
- self.env, resources_to_keep=self.before_run)
- if self.skip_cleanup:
- self.logger.warn('[{0}] SKIPPING cleanup of resources: {1}'
- .format(self.context_name, resources_to_teardown))
- else:
- self._clean(self.env, resources_to_teardown)
-
- @classmethod
- def clean_all(cls, env):
- """
- Cleans *all* resources, including resources that were not
- created by the test
- """
- super(OpenstackCleanupContext, cls).clean_all(env)
- resources_to_teardown = cls.get_resources_to_teardown(env)
- cls._clean(env, resources_to_teardown)
-
- @classmethod
- def _clean(cls, env, resources_to_teardown):
- cls.logger.info('Openstack handler will try to remove these resources:'
- ' {0}'.format(resources_to_teardown))
- failed_to_remove = env.handler.remove_openstack_resources(
- resources_to_teardown)
- if failed_to_remove:
- trimmed_failed_to_remove = {key: value for key, value in
- failed_to_remove.iteritems()
- if value}
- if len(trimmed_failed_to_remove) > 0:
- msg = 'Openstack handler failed to remove some resources:' \
- ' {0}'.format(trimmed_failed_to_remove)
- cls.logger.error(msg)
- raise RuntimeError(msg)
-
- @classmethod
- def get_resources_to_teardown(cls, env, resources_to_keep=None):
- all_existing_resources = env.handler.openstack_infra_state()
- if resources_to_keep:
- return env.handler.openstack_infra_state_delta(
- before=resources_to_keep, after=all_existing_resources)
- else:
- return all_existing_resources
-
- def update_server_id(self, server_name):
-
- # retrieve the id of the new server
- nova, _, _ = self.env.handler.openstack_clients()
- servers = nova.servers.list(
- search_opts={'name': server_name})
- if len(servers) > 1:
- raise RuntimeError(
- 'Expected 1 server with name {0}, but found {1}'
- .format(server_name, len(servers)))
-
- new_server_id = servers[0].id
-
- # retrieve the id of the old server
- old_server_id = None
- servers = self.before_run['servers']
- for server_id, name in servers.iteritems():
- if server_name == name:
- old_server_id = server_id
- break
- if old_server_id is None:
- raise RuntimeError(
- 'Could not find a server with name {0} '
- 'in the internal cleanup context state'
- .format(server_name))
-
- # replace the id in the internal state
- servers[new_server_id] = servers.pop(old_server_id)
-
-
-class CloudifyOpenstackInputsConfigReader(BaseCloudifyInputsConfigReader):
-
- def __init__(self, cloudify_config, manager_blueprint_path, **kwargs):
- super(CloudifyOpenstackInputsConfigReader, self).__init__(
- cloudify_config, manager_blueprint_path=manager_blueprint_path,
- **kwargs)
-
- @property
- def region(self):
- return self.config['region']
-
- @property
- def management_server_name(self):
- return self.config['manager_server_name']
-
- @property
- def agent_key_path(self):
- return self.config['agent_private_key_path']
-
- @property
- def management_user_name(self):
- return self.config['ssh_user']
-
- @property
- def management_key_path(self):
- return self.config['ssh_key_filename']
-
- @property
- def agent_keypair_name(self):
- return self.config['agent_public_key_name']
-
- @property
- def management_keypair_name(self):
- return self.config['manager_public_key_name']
-
- @property
- def use_existing_agent_keypair(self):
- return self.config['use_existing_agent_keypair']
-
- @property
- def use_existing_manager_keypair(self):
- return self.config['use_existing_manager_keypair']
-
- @property
- def external_network_name(self):
- return self.config['external_network_name']
-
- @property
- def keystone_username(self):
- return self.config['keystone_username']
-
- @property
- def keystone_password(self):
- return self.config['keystone_password']
-
- @property
- def keystone_tenant_name(self):
- return self.config['keystone_tenant_name']
-
- @property
- def keystone_url(self):
- return self.config['keystone_url']
-
- @property
- def neutron_url(self):
- return self.config.get('neutron_url', None)
-
- @property
- def management_network_name(self):
- return self.config['management_network_name']
-
- @property
- def management_subnet_name(self):
- return self.config['management_subnet_name']
-
- @property
- def management_router_name(self):
- return self.config['management_router']
-
- @property
- def agents_security_group(self):
- return self.config['agents_security_group_name']
-
- @property
- def management_security_group(self):
- return self.config['manager_security_group_name']
-
-
-class OpenstackHandler(BaseHandler):
-
- CleanupContext = OpenstackCleanupContext
- CloudifyConfigReader = CloudifyOpenstackInputsConfigReader
-
- def before_bootstrap(self):
- super(OpenstackHandler, self).before_bootstrap()
- with self.update_cloudify_config() as patch:
- suffix = '-%06x' % random.randrange(16 ** 6)
- server_name_prop_path = 'manager_server_name'
- patch.append_value(server_name_prop_path, suffix)
-
- def after_bootstrap(self, provider_context):
- super(OpenstackHandler, self).after_bootstrap(provider_context)
- resources = provider_context['resources']
- agent_keypair = resources['agents_keypair']
- management_keypair = resources['management_keypair']
- self.remove_agent_keypair = agent_keypair['external_resource'] is False
- self.remove_management_keypair = \
- management_keypair['external_resource'] is False
-
- def after_teardown(self):
- super(OpenstackHandler, self).after_teardown()
- if self.remove_agent_keypair:
- agent_key_path = get_actual_keypath(self.env,
- self.env.agent_key_path,
- raise_on_missing=False)
- if agent_key_path:
- os.remove(agent_key_path)
- if self.remove_management_keypair:
- management_key_path = get_actual_keypath(
- self.env,
- self.env.management_key_path,
- raise_on_missing=False)
- if management_key_path:
- os.remove(management_key_path)
-
- def openstack_clients(self):
- creds = self._client_creds()
- params = {
- 'region_name': creds.pop('region_name'),
- }
-
- loader = loading.get_plugin_loader("password")
- auth = loader.load_from_options(**creds)
- sess = session.Session(auth=auth, verify=True)
-
- params['session'] = sess
-
- nova = nvclient.Client('2', **params)
- neutron = neclient.Client(**params)
- cinder = cinderclient.Client('2', **params)
-
- return (nova, neutron, cinder)
-
- @retry(stop_max_attempt_number=5, wait_fixed=20000)
- def openstack_infra_state(self):
- """
- @retry decorator is used because this error sometimes occur:
- ConnectionFailed: Connection to neutron failed: Maximum
- attempts reached
- """
- nova, neutron, cinder = self.openstack_clients()
- try:
- prefix = self.env.resources_prefix
- except (AttributeError, KeyError):
- prefix = ''
- return {
- 'networks': dict(self._networks(neutron, prefix)),
- 'subnets': dict(self._subnets(neutron, prefix)),
- 'routers': dict(self._routers(neutron, prefix)),
- 'security_groups': dict(self._security_groups(neutron, prefix)),
- 'servers': dict(self._servers(nova, prefix)),
- 'key_pairs': dict(self._key_pairs(nova, prefix)),
- 'floatingips': dict(self._floatingips(neutron, prefix)),
- 'ports': dict(self._ports(neutron, prefix)),
- 'volumes': dict(self._volumes(cinder, prefix))
- }
-
- def openstack_infra_state_delta(self, before, after):
- after = copy.deepcopy(after)
- return {
- prop: self._remove_keys(after[prop], before[prop].keys())
- for prop in before
- }
-
- def _find_keypairs_to_delete(self, nodes, node_instances):
- """Filter the nodes only returning the names of keypair nodes
-
- Examine node_instances and nodes, return the external_name of
- those node_instances, which correspond to a node that has a
- type == KeyPair
-
- To filter by deployment_id, simply make sure that the nodes and
- node_instances this method receives, are pre-filtered
- (ie. filter the nodes while fetching them from the manager)
- """
- keypairs = set() # a set of (deployment_id, node_id) tuples
-
- for node in nodes:
- if node.get('type') != 'cloudify.openstack.nodes.KeyPair':
- continue
- # deployment_id isnt always present in local_env runs
- key = (node.get('deployment_id'), node['id'])
- keypairs.add(key)
-
- for node_instance in node_instances:
- key = (node_instance.get('deployment_id'),
- node_instance['node_id'])
- if key not in keypairs:
- continue
-
- runtime_properties = node_instance['runtime_properties']
- if not runtime_properties:
- continue
- name = runtime_properties.get('external_name')
- if name:
- yield name
-
- def _delete_keypairs_by_name(self, keypair_names):
- nova, neutron, cinder = self.openstack_clients()
- existing_keypairs = nova.keypairs.list()
-
- for name in keypair_names:
- for keypair in existing_keypairs:
- if keypair.name == name:
- nova.keypairs.delete(keypair)
-
- def remove_keypairs_from_local_env(self, local_env):
- """Query the local_env for nodes which are keypairs, remove them
-
- Similar to querying the manager, we can look up nodes in the local_env
- which is used for tests.
- """
- nodes = local_env.storage.get_nodes()
- node_instances = local_env.storage.get_node_instances()
- names = self._find_keypairs_to_delete(nodes, node_instances)
- self._delete_keypairs_by_name(names)
-
- def remove_keypairs_from_manager(self, deployment_id=None,
- rest_client=None):
- """Query the manager for nodes by deployment_id, delete keypairs
-
- Fetch nodes and node_instances from the manager by deployment_id
- (or all if not given), find which ones represent openstack keypairs,
- remove them.
- """
- if rest_client is None:
- rest_client = self.env.rest_client
-
- nodes = rest_client.nodes.list(deployment_id=deployment_id)
- node_instances = rest_client.node_instances.list(
- deployment_id=deployment_id)
- keypairs = self._find_keypairs_to_delete(nodes, node_instances)
- self._delete_keypairs_by_name(keypairs)
-
- def remove_keypair(self, name):
- """Delete an openstack keypair by name. If it doesnt exist, do nothing.
- """
- self._delete_keypairs_by_name([name])
-
- def remove_openstack_resources(self, resources_to_remove):
- # basically sort of a workaround, but if we get the order wrong
- # the first time, there is a chance things would better next time
- # 3'rd time can't really hurt, can it?
- # 3 is a charm
- for _ in range(3):
- resources_to_remove = self._remove_openstack_resources_impl(
- resources_to_remove)
- if all([len(g) == 0 for g in resources_to_remove.values()]):
- break
- # give openstack some time to update its data structures
- time.sleep(3)
- return resources_to_remove
-
- def _remove_openstack_resources_impl(self, resources_to_remove):
- nova, neutron, cinder = self.openstack_clients()
-
- servers = nova.servers.list()
- ports = neutron.list_ports()['ports']
- routers = neutron.list_routers()['routers']
- subnets = neutron.list_subnets()['subnets']
- networks = neutron.list_networks()['networks']
- # keypairs = nova.keypairs.list()
- floatingips = neutron.list_floatingips()['floatingips']
- security_groups = neutron.list_security_groups()['security_groups']
- volumes = cinder.volumes.list()
-
- failed = {
- 'servers': {},
- 'routers': {},
- 'ports': {},
- 'subnets': {},
- 'networks': {},
- 'key_pairs': {},
- 'floatingips': {},
- 'security_groups': {},
- 'volumes': {}
- }
-
- volumes_to_remove = []
- for volume in volumes:
- if volume.id in resources_to_remove['volumes']:
- volumes_to_remove.append(volume)
-
- left_volumes = self._delete_volumes(nova, cinder, volumes_to_remove)
- for volume_id, ex in left_volumes.iteritems():
- failed['volumes'][volume_id] = ex
-
- for server in servers:
- if server.id in resources_to_remove['servers']:
- with self._handled_exception(server.id, failed, 'servers'):
- nova.servers.delete(server)
-
- for router in routers:
- if router['id'] in resources_to_remove['routers']:
- with self._handled_exception(router['id'], failed, 'routers'):
- for p in neutron.list_ports(
- device_id=router['id'])['ports']:
- neutron.remove_interface_router(router['id'], {
- 'port_id': p['id']
- })
- neutron.delete_router(router['id'])
-
- for port in ports:
- if port['id'] in resources_to_remove['ports']:
- with self._handled_exception(port['id'], failed, 'ports'):
- neutron.delete_port(port['id'])
-
- for subnet in subnets:
- if subnet['id'] in resources_to_remove['subnets']:
- with self._handled_exception(subnet['id'], failed, 'subnets'):
- neutron.delete_subnet(subnet['id'])
-
- for network in networks:
- if network['name'] == self.env.external_network_name:
- continue
- if network['id'] in resources_to_remove['networks']:
- with self._handled_exception(network['id'], failed,
- 'networks'):
- neutron.delete_network(network['id'])
-
- # TODO: implement key-pair creation and cleanup per tenant
- #
- # IMPORTANT: Do not remove key-pairs, they might be used
- # by another tenant (of the same user)
- #
- # for key_pair in keypairs:
- # if key_pair.name == self.env.agent_keypair_name and \
- # self.env.use_existing_agent_keypair:
- # # this is a pre-existing agent key-pair, do not remove
- # continue
- # elif key_pair.name == self.env.management_keypair_name and \
- # self.env.use_existing_manager_keypair:
- # # this is a pre-existing manager key-pair, do not remove
- # continue
- # elif key_pair.id in resources_to_remove['key_pairs']:
- # with self._handled_exception(key_pair.id, failed,
- # 'key_pairs'):
- # nova.keypairs.delete(key_pair)
-
- for floatingip in floatingips:
- if floatingip['id'] in resources_to_remove['floatingips']:
- with self._handled_exception(floatingip['id'], failed,
- 'floatingips'):
- neutron.delete_floatingip(floatingip['id'])
-
- for security_group in security_groups:
- if security_group['name'] == 'default':
- continue
- if security_group['id'] in resources_to_remove['security_groups']:
- with self._handled_exception(security_group['id'],
- failed, 'security_groups'):
- neutron.delete_security_group(security_group['id'])
-
- return failed
-
- def _delete_volumes(self, nova, cinder, existing_volumes):
- unremovables = {}
- end_time = time.time() + VOLUME_TERMINATION_TIMEOUT_SECS
-
- for volume in existing_volumes:
- # detach the volume
- if volume.status in ['available', 'error', 'in-use']:
- try:
- self.logger.info('Detaching volume {0} ({1}), currently in'
- ' status {2} ...'.
- format(volume.name, volume.id,
- volume.status))
- for attachment in volume.attachments:
- nova.volumes.delete_server_volume(
- server_id=attachment['server_id'],
- attachment_id=attachment['id'])
- except Exception as e:
- self.logger.warning('Attempt to detach volume {0} ({1})'
- ' yielded exception: "{2}"'.
- format(volume.name, volume.id,
- e))
- unremovables[volume.id] = e
- existing_volumes.remove(volume)
-
- time.sleep(3)
- for volume in existing_volumes:
- # delete the volume
- if volume.status in ['available', 'error', 'in-use']:
- try:
- self.logger.info('Deleting volume {0} ({1}), currently in'
- ' status {2} ...'.
- format(volume.name, volume.id,
- volume.status))
- cinder.volumes.delete(volume)
- except Exception as e:
- self.logger.warning('Attempt to delete volume {0} ({1})'
- ' yielded exception: "{2}"'.
- format(volume.name, volume.id,
- e))
- unremovables[volume.id] = e
- existing_volumes.remove(volume)
-
- # wait for all volumes deletion until completed or timeout is reached
- while existing_volumes and time.time() < end_time:
- time.sleep(3)
- for volume in existing_volumes:
- volume_id = volume.id
- volume_name = volume.name
- try:
- vol = cinder.volumes.get(volume_id)
- if vol.status == 'deleting':
- self.logger.debug('volume {0} ({1}) is being '
- 'deleted...'.format(volume_name,
- volume_id))
- else:
- self.logger.warning('volume {0} ({1}) is in '
- 'unexpected status: {2}'.
- format(volume_name, volume_id,
- vol.status))
- except Exception as e:
- # the volume wasn't found, it was deleted
- if hasattr(e, 'code') and e.code == 404:
- self.logger.info('deleted volume {0} ({1})'.
- format(volume_name, volume_id))
- existing_volumes.remove(volume)
- else:
- self.logger.warning('failed to remove volume {0} '
- '({1}), exception: {2}'.
- format(volume_name,
- volume_id, e))
- unremovables[volume_id] = e
- existing_volumes.remove(volume)
-
- if existing_volumes:
- for volume in existing_volumes:
- # try to get the volume's status
- try:
- vol = cinder.volumes.get(volume.id)
- vol_status = vol.status
- except:
- # failed to get volume... status is unknown
- vol_status = 'unknown'
-
- unremovables[volume.id] = 'timed out while removing volume '\
- '{0} ({1}), current volume status '\
- 'is {2}'.format(volume.name,
- volume.id,
- vol_status)
-
- if unremovables:
- self.logger.warning('failed to remove volumes: {0}'.format(
- unremovables))
-
- return unremovables
-
- def _client_creds(self):
- return {
- 'username': self.env.keystone_username,
- 'password': self.env.keystone_password,
- 'auth_url': self.env.keystone_url,
- 'project_name': self.env.keystone_tenant_name,
- 'region_name': self.env.region
- }
-
- def _networks(self, neutron, prefix):
- return [(n['id'], n['name'])
- for n in neutron.list_networks()['networks']
- if self._check_prefix(n['name'], prefix)]
-
- def _subnets(self, neutron, prefix):
- return [(n['id'], n['name'])
- for n in neutron.list_subnets()['subnets']
- if self._check_prefix(n['name'], prefix)]
-
- def _routers(self, neutron, prefix):
- return [(n['id'], n['name'])
- for n in neutron.list_routers()['routers']
- if self._check_prefix(n['name'], prefix)]
-
- def _security_groups(self, neutron, prefix):
- return [(n['id'], n['name'])
- for n in neutron.list_security_groups()['security_groups']
- if self._check_prefix(n['name'], prefix)]
-
- def _servers(self, nova, prefix):
- return [(s.id, s.human_id)
- for s in nova.servers.list()
- if self._check_prefix(s.human_id, prefix)]
-
- def _key_pairs(self, nova, prefix):
- return [(kp.id, kp.name)
- for kp in nova.keypairs.list()
- if self._check_prefix(kp.name, prefix)]
-
- def _floatingips(self, neutron, prefix):
- return [(ip['id'], ip['floating_ip_address'])
- for ip in neutron.list_floatingips()['floatingips']]
-
- def _ports(self, neutron, prefix):
- return [(p['id'], p['name'])
- for p in neutron.list_ports()['ports']
- if self._check_prefix(p['name'], prefix)]
-
- def _volumes(self, cinder, prefix):
- return [(v.id, v.name) for v in cinder.volumes.list()
- if self._check_prefix(v.name, prefix)]
-
- def _check_prefix(self, name, prefix):
- # some openstack resources (eg. volumes) can have no display_name,
- # in which case it's None
- return name is None or name.startswith(prefix)
-
- def _remove_keys(self, dct, keys):
- for key in keys:
- if key in dct:
- del dct[key]
- return dct
-
- @contextmanager
- def _handled_exception(self, resource_id, failed, resource_group):
- try:
- yield
- except BaseException, ex:
- failed[resource_group][resource_id] = ex
-
-
-handler = OpenstackHandler
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/openstack_nova_net_handler.py b/aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/openstack_nova_net_handler.py
deleted file mode 100644
index 06fa0ab4d0..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/system_tests/openstack_nova_net_handler.py
+++ /dev/null
@@ -1,98 +0,0 @@
-########
-# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-
-import novaclient.v2.client as nvclient
-
-from system_tests.openstack_handler import OpenstackHandler
-
-
-class OpenstackNovaNetHandler(OpenstackHandler):
-
- # using the Config Readers of the regular OpenstackHandler - attempts
- # of reading neutron-related data may fail but shouldn't happen from
- # nova-net tests in the first place
- # CloudifyConfigReader = None
-
- def openstack_clients(self):
- creds = self._client_creds()
- return nvclient.Client(**creds)
-
- def openstack_infra_state(self):
- nova = self.openstack_clients()
- prefix = self.env.resources_prefix
- return {
- 'security_groups': dict(self._security_groups(nova, prefix)),
- 'servers': dict(self._servers(nova, prefix)),
- 'key_pairs': dict(self._key_pairs(nova, prefix)),
- 'floatingips': dict(self._floatingips(nova, prefix)),
- }
-
- def _floatingips(self, nova, prefix):
- return [(ip.id, ip.ip)
- for ip in nova.floating_ips.list()]
-
- def _security_groups(self, nova, prefix):
- return [(n.id, n.name)
- for n in nova.security_groups.list()
- if self._check_prefix(n.name, prefix)]
-
- def _remove_openstack_resources_impl(self, resources_to_remove):
- nova = self.openstack_clients()
-
- servers = nova.servers.list()
- keypairs = nova.keypairs.list()
- floatingips = nova.floating_ips.list()
- security_groups = nova.security_groups.list()
-
- failed = {
- 'servers': {},
- 'key_pairs': {},
- 'floatingips': {},
- 'security_groups': {}
- }
-
- for server in servers:
- if server.id in resources_to_remove['servers']:
- with self._handled_exception(server.id, failed, 'servers'):
- nova.servers.delete(server)
- for key_pair in keypairs:
- if key_pair.name == self.env.agent_keypair_name and \
- self.env.use_existing_agent_keypair:
- # this is a pre-existing agent key-pair, do not remove
- continue
- elif key_pair.name == self.env.management_keypair_name and \
- self.env.use_existing_manager_keypair:
- # this is a pre-existing manager key-pair, do not remove
- continue
- elif key_pair.id in resources_to_remove['key_pairs']:
- with self._handled_exception(key_pair.id, failed, 'key_pairs'):
- nova.keypairs.delete(key_pair)
- for floatingip in floatingips:
- if floatingip.id in resources_to_remove['floatingips']:
- with self._handled_exception(floatingip.id, failed,
- 'floatingips'):
- nova.floating_ips.delete(floatingip)
- for security_group in security_groups:
- if security_group.name == 'default':
- continue
- if security_group.id in resources_to_remove['security_groups']:
- with self._handled_exception(security_group.id, failed,
- 'security_groups'):
- nova.security_groups.delete(security_group)
-
- return failed
-
-
-handler = OpenstackNovaNetHandler
diff --git a/aria/multivim-plugin/src/main/python/multivim-plugin/tox.ini b/aria/multivim-plugin/src/main/python/multivim-plugin/tox.ini
deleted file mode 100644
index b3572d70d2..0000000000
--- a/aria/multivim-plugin/src/main/python/multivim-plugin/tox.ini
+++ /dev/null
@@ -1,44 +0,0 @@
-# content of: tox.ini , put in same dir as setup.py
-[tox]
-envlist=flake8,docs,py27
-
-[testenv]
-deps =
- -rdev-requirements.txt
-
-[testenv:py27]
-deps =
- coverage==3.7.1
- nose
- nose-cov
- mock
- testfixtures
- {[testenv]deps}
-commands =
- nosetests --with-cov --cov-report term-missing \
- --cov cinder_plugin cinder_plugin/tests \
- --cov glance_plugin glance_plugin/tests \
- --cov keystone_plugin keystone_plugin/tests \
- --cov neutron_plugin \
- neutron_plugin/tests/test_port.py neutron_plugin/tests/test_security_group.py \
- --cov nova_plugin nova_plugin/tests \
- --cov openstack_plugin_common openstack_plugin_common/tests
-
-[testenv:docs]
-changedir=docs
-deps =
- git+https://github.com/cloudify-cosmo/sphinxify.git@initial-work
-commands =
- sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html
-
-[testenv:flake8]
-deps =
- flake8
- {[testenv]deps}
-commands =
- flake8 cinder_plugin
- flake8 neutron_plugin
- flake8 nova_plugin
- flake8 openstack_plugin_common
- flake8 glance_plugin
- flake8 keystone_plugin
diff --git a/bpmn/MSOCoreBPMN/src/main/java/org/openecomp/mso/bpmn/core/PropertyConfiguration.java b/bpmn/MSOCoreBPMN/src/main/java/org/openecomp/mso/bpmn/core/PropertyConfiguration.java
index b6272057a5..f75722ecc5 100644
--- a/bpmn/MSOCoreBPMN/src/main/java/org/openecomp/mso/bpmn/core/PropertyConfiguration.java
+++ b/bpmn/MSOCoreBPMN/src/main/java/org/openecomp/mso/bpmn/core/PropertyConfiguration.java
@@ -115,6 +115,10 @@ public class PropertyConfiguration {
return PropertyConfigurationInstanceHolder.instance;
}
+ static void resetPropertyConfigurationSingletonInstance(){
+ PropertyConfigurationInstanceHolder.instance = new PropertyConfiguration();
+ }
+
/**
* Returns the list of supported files.
*/
diff --git a/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/PropertyConfigurationTest.java b/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/PropertyConfigurationTest.java
index 506dba2552..57a512891f 100644
--- a/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/PropertyConfigurationTest.java
+++ b/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/PropertyConfigurationTest.java
@@ -51,6 +51,8 @@ import org.junit.Test;
public class PropertyConfigurationTest {
@Before
public void beforeTest() throws IOException {
+ PropertyConfiguration.resetPropertyConfigurationSingletonInstance();
+
Map<String, String> defaultProperties = PropertyConfigurationSetup.createBpmnProperties();
defaultProperties.put("testValue", "testKey");
PropertyConfigurationSetup.init(defaultProperties);
diff --git a/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/TestBaseTask.java b/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/TestBaseTask.java
index d434ac702d..1346fde674 100644
--- a/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/TestBaseTask.java
+++ b/bpmn/MSOCoreBPMN/src/test/java/org/openecomp/mso/bpmn/core/TestBaseTask.java
@@ -32,6 +32,7 @@ import org.camunda.bpm.engine.test.Deployment;
import org.camunda.bpm.engine.test.ProcessEngineRule;
import org.junit.Assert;
import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
@@ -45,7 +46,13 @@ public class TestBaseTask {
@Rule
public ProcessEngineRule processEngineRule = new ProcessEngineRule();
-
+
+ @BeforeClass
+ public static void setUpClass() {
+ System.setProperty("mso.config.path", "src/test/resources");
+ PropertyConfiguration.resetPropertyConfigurationSingletonInstance();
+ }
+
@Before
public void beforeTest() throws Exception {
CamundaDBSetup.configure();
diff --git a/bpmn/MSOInfrastructureBPMN/src/main/groovy/org/openecomp/mso/bpmn/infrastructure/scripts/CreateActivateSDNCResource.groovy b/bpmn/MSOInfrastructureBPMN/src/main/groovy/org/openecomp/mso/bpmn/infrastructure/scripts/CreateActivateSDNCResource.groovy
new file mode 100644
index 0000000000..e5f52a7406
--- /dev/null
+++ b/bpmn/MSOInfrastructureBPMN/src/main/groovy/org/openecomp/mso/bpmn/infrastructure/scripts/CreateActivateSDNCResource.groovy
@@ -0,0 +1,425 @@
+/*-
+ * ============LICENSE_START=======================================================
+ * OPENECOMP - SO
+ * ================================================================================
+ * Copyright (C) 2018 Huawei Technologies Co., Ltd. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.openecomp.mso.bpmn.infrastructure.scripts
+
+import org.json.JSONObject
+import org.json.XML;
+
+import static org.apache.commons.lang3.StringUtils.*;
+import groovy.xml.XmlUtil
+import groovy.json.*
+import org.openecomp.mso.bpmn.common.scripts.AbstractServiceTaskProcessor
+import org.openecomp.mso.bpmn.common.scripts.ExceptionUtil
+import org.openecomp.mso.bpmn.common.recipe.ResourceInput;
+import org.openecomp.mso.bpmn.common.resource.ResourceRequestBuilder
+import org.openecomp.mso.bpmn.core.WorkflowException
+import org.openecomp.mso.bpmn.core.json.JsonUtils
+import org.openecomp.mso.bpmn.infrastructure.workflow.serviceTask.client.builder.AbstractBuilder
+import org.openecomp.mso.rest.APIResponse
+import org.openecomp.mso.bpmn.common.scripts.SDNCAdapterUtils
+import org.openecomp.mso.bpmn.infrastructure.workflow.service.ServicePluginFactory
+import java.util.UUID;
+
+import org.camunda.bpm.engine.runtime.Execution
+import org.camunda.bpm.engine.delegate.BpmnError
+import org.camunda.bpm.engine.delegate.DelegateExecution
+import org.apache.commons.lang3.*
+import org.apache.commons.codec.binary.Base64;
+import org.springframework.web.util.UriUtils
+import org.openecomp.mso.rest.RESTClient
+import org.openecomp.mso.rest.RESTConfig
+
+/**
+ * This groovy class supports the <class>CreateActivateSDNCResource.bpmn</class> process.
+ * flow for SDNC Network Resource Create
+ */
+public class CreateActivateSDNCResource extends AbstractServiceTaskProcessor {
+
+ String Prefix="CRESDNCRES_"
+
+ ExceptionUtil exceptionUtil = new ExceptionUtil()
+
+ JsonUtils jsonUtil = new JsonUtils()
+
+ SDNCAdapterUtils sdncAdapterUtils = new SDNCAdapterUtils()
+
+ public void preProcessRequest(DelegateExecution execution){
+ def isDebugEnabled = execution.getVariable("isDebugLogEnabled")
+ utils.log("INFO"," ***** Started preProcessRequest *****", isDebugEnabled)
+ try {
+
+ //get bpmn inputs from resource request.
+ String requestId = execution.getVariable("mso-request-id")
+ String requestAction = execution.getVariable("requestAction")
+ utils.log("INFO","The requestAction is: " + requestAction, isDebugEnabled)
+ String recipeParamsFromRequest = execution.getVariable("recipeParams")
+ utils.log("INFO","The recipeParams is: " + recipeParamsFromRequest, isDebugEnabled)
+ String resourceInput = execution.getVariable("resourceInput")
+ utils.log("INFO","The resourceInput is: " + resourceInput, isDebugEnabled)
+ //Get ResourceInput Object
+ ResourceInput resourceInputObj = ResourceRequestBuilder.getJsonObject(resourceInput, ResourceInput.class)
+ execution.setVariable(Prefix + "resourceInput", resourceInputObj)
+
+ //Deal with recipeParams
+ String recipeParamsFromWf = execution.getVariable("recipeParamXsd")
+ String resourceName = resourceInputObj.getResourceInstanceName()
+ //For sdnc requestAction default is "createNetworkInstance"
+ String operationType = "Network"
+ String apiType = "network"
+ if(!StringUtils.isBlank(recipeParamsFromRequest)){
+ //the operationType from worflow(first node) is second priority.
+ operationType = jsonUtil.getJsonValue(recipeParamsFromRequest, "operationType")
+ apiType = jsonUtil.getJsonValue(recipeParamsFromRequest, "apiType")
+ }
+ if(!StringUtils.isBlank(recipeParamsFromWf)){
+ //the operationType from worflow(first node) is highest priority.
+ operationType = jsonUtil.getJsonValue(recipeParamsFromWf, "operationType")
+ apiType = jsonUtil.getJsonValue(recipeParamsFromRequest, "apiType")
+ }
+
+ execution.setVariable(Prefix + "operationType", operationType)
+ execution.setVariable(Prefix + "apiType", apiType)
+ execution.setVariable(Prefix + "serviceInstanceId", resourceInputObj.getServiceInstanceId())
+ execution.setVariable("mso-request-id", requestId)
+ execution.setVariable("mso-service-instance-id", resourceInputObj.getServiceInstanceId())
+ //TODO Here build networkrequest
+
+ } catch (BpmnError e) {
+ throw e;
+ } catch (Exception ex){
+ String msg = "Exception in preProcessRequest " + ex.getMessage()
+ utils.log("DEBUG", msg, isDebugEnabled)
+ exceptionUtil.buildAndThrowWorkflowException(execution, 7000, msg)
+ }
+ }
+
+ String customizeResourceParam(String netowrkInputParametersJson) {
+ List<Map<String, Object>> paramList = new ArrayList();
+ JSONObject jsonObject = new JSONObject(netowrkInputParametersJson);
+ Iterator iterator = jsonObject.keys();
+ while (iterator.hasNext()) {
+ String key = iterator.next();
+ HashMap<String, String> hashMap = new HashMap();
+ hashMap.put("name", key);
+ hashMap.put("value", jsonObject.get(key))
+ paramList.add(hashMap)
+ }
+ Map<String, List<Map<String, Object>>> paramMap = new HashMap();
+ paramMap.put("param", paramList);
+
+ return new JSONObject(paramMap).toString();
+ }
+
+ public void prepareSDNCRequest (DelegateExecution execution) {
+ String svcAction = "create"
+ prepareSDNCRequestReq(execution, svcAction, "")
+ }
+
+
+ public void prepareSDNCActivateRequest (DelegateExecution execution) {
+ String svcAction = "activate"
+ String sndcResourceId = execution.getVariable(Prefix + "sdncResourceId")
+ prepareSDNCRequestReq(execution, svcAction, sndcResourceId)
+ }
+ /**
+ * Pre Process the BPMN Flow Request
+ * Inclouds:
+ * generate the nsOperationKey
+ * generate the nsParameters
+ */
+ public void prepareSDNCRequestReq (DelegateExecution execution, String svcAction, String sdncResourceId) {
+ def isDebugEnabled = execution.getVariable("isDebugLogEnabled")
+ utils.log("INFO"," ***** Started prepareSDNCRequest *****", isDebugEnabled)
+
+ try {
+ // get variables
+ String operationType = execution.getVariable(Prefix + "operationType")
+ String sdnc_apiType = execution.getVariable(Prefix + "apiType")
+ String sdnc_svcAction = svcAction
+ String sdnc_requestAction = StringUtils.capitalize(sdnc_svcAction) + operationType +"Instance"
+
+ String sdncCallback = execution.getVariable("URN_mso_workflow_sdncadapter_callback")
+ String createNetworkInput = execution.getVariable(Prefix + "networkRequest")
+
+ String hdrRequestId = execution.getVariable("mso-request-id")
+ String serviceInstanceId = execution.getVariable(Prefix + "serviceInstanceId")
+ String source = execution.getVariable("source")
+ String sdnc_service_id = execution.getVariable(Prefix + "sdncServiceId")
+ ResourceInput resourceInputObj = execution.getVariable(Prefix + "resourceInput")
+ String serviceType = resourceInputObj.getServiceType()
+ String serviceModelInvariantUuid = resourceInputObj.getServiceModelInfo().getModelInvariantUuid()
+ String serviceModelUuid = resourceInputObj.getServiceModelInfo().getModelUuid()
+ String serviceModelVersion = resourceInputObj.getServiceModelInfo().getModelVersion()
+ String serviceModelName = resourceInputObj.getServiceModelInfo().getModelName()
+ String globalCustomerId = resourceInputObj.getGlobalSubscriberId()
+ String modelInvariantUuid = resourceInputObj.getResourceModelInfo().getModelInvariantUuid();
+ String modelCustomizationUuid = resourceInputObj.getResourceModelInfo().getModelCustomizationUuid()
+ String modelUuid = resourceInputObj.getResourceModelInfo().getModelUuid()
+ String modelName = resourceInputObj.getResourceModelInfo().getModelName()
+ String modelVersion = resourceInputObj.getResourceModelInfo().getModelVersion()
+ String resourceInputPrameters = resourceInputObj.getResourceParameters()
+ String netowrkInputParametersJson = jsonUtil.getJsonValue(resourceInputPrameters, "requestInputs")
+ //here convert json string to xml string
+ String netowrkInputParameters = XML.toString(new JSONObject(customizeResourceParam(netowrkInputParametersJson)))
+
+ // 1. prepare assign topology via SDNC Adapter SUBFLOW call
+ String sndcTopologyCreateRequest =
+ """<aetgt:SDNCAdapterWorkflowRequest xmlns:aetgt="http://org.openecomp/mso/workflow/schema/v1"
+ xmlns:sdncadapter="http://org.openecomp.mso/workflow/sdnc/adapter/schema/v1"
+ xmlns:sdncadapterworkflow="http://org.openecomp/mso/workflow/schema/v1">
+ <sdncadapter:RequestHeader>
+ <sdncadapter:RequestId>${hdrRequestId}</sdncadapter:RequestId>
+ <sdncadapter:SvcInstanceId>${serviceInstanceId}</sdncadapter:SvcInstanceId>
+ <sdncadapter:SvcAction>${sdnc_svcAction}</sdncadapter:SvcAction>
+ <sdncadapter:SvcOperation>${sdnc_apiType}-topology-operation</sdncadapter:SvcOperation>
+ <sdncadapter:CallbackUrl>sdncCallback</sdncadapter:CallbackUrl>
+ <sdncadapter:MsoAction>generic-resource</sdncadapter:MsoAction>
+ </sdncadapter:RequestHeader>
+ <sdncadapterworkflow:SDNCRequestData>
+ <request-information>
+ <request-id>${hdrRequestId}</request-id>
+ <request-action>${sdnc_requestAction}</request-action>
+ <source>${source}</source>
+ <notification-url></notification-url>
+ <order-number></order-number>
+ <order-version></order-version>
+ </request-information>
+ <service-information>
+ <service-id>${serviceInstanceId}</service-id>
+ <subscription-service-type>${serviceType}</subscription-service-type>
+ <onap-model-information>
+ <model-invariant-uuid>${serviceModelInvariantUuid}</model-invariant-uuid>
+ <model-uuid>${serviceModelUuid}</model-uuid>
+ <model-version>${serviceModelVersion}</model-version>
+ <model-name>${serviceModelName}</model-name>
+ </onap-model-information>
+ <service-instance-id>${serviceInstanceId}</service-instance-id>
+ <global-customer-id>${globalCustomerId}</global-customer-id>
+ </service-information>
+ <${sdnc_apiType}-information>
+ <${sdnc_apiType}-id>${sdncResourceId}</${sdnc_apiType}-id>
+ <onap-model-information>
+ <model-invariant-uuid>${modelInvariantUuid}</model-invariant-uuid>
+ <model-customization-uuid>${modelCustomizationUuid}</model-customization-uuid>
+ <model-uuid>${modelUuid}</model-uuid>
+ <model-version>${modelVersion}</model-version>
+ <model-name>${modelName}</model-name>
+ </onap-model-information>
+ </${sdnc_apiType}-information>
+ <${sdnc_apiType}-request-input>
+ <${sdnc_apiType}-input-parameters>${netowrkInputParameters}</${sdnc_apiType}-input-parameters>
+ </${sdnc_apiType}-request-input>
+ </sdncadapterworkflow:SDNCRequestData>
+ </aetgt:SDNCAdapterWorkflowRequest>""".trim()
+
+ String sndcTopologyCreateRequesAsString = utils.formatXml(sndcTopologyCreateRequest)
+ utils.logAudit(sndcTopologyCreateRequesAsString)
+ execution.setVariable("sdncAdapterWorkflowRequest", sndcTopologyCreateRequesAsString)
+ utils.log("INFO","sdncAdapterWorkflowRequest :" + sndcTopologyCreateRequesAsString, isDebugEnabled)
+ utils.log("DEBUG","sdncAdapterWorkflowRequest - " + "\n" + sndcTopologyCreateRequesAsString, isDebugEnabled)
+
+ } catch (Exception ex) {
+ String exceptionMessage = " Bpmn error encountered in CreateSDNCCNetworkResource flow. prepareSDNCRequest() - " + ex.getMessage()
+ utils.log("DEBUG", exceptionMessage, isDebugEnabled)
+ exceptionUtil.buildAndThrowWorkflowException(execution, 7000, exceptionMessage)
+
+ }
+ utils.log("INFO"," ***** Exit prepareSDNCRequest *****", isDebugEnabled)
+ }
+
+ private void setProgressUpdateVariables(DelegateExecution execution, String body) {
+ def dbAdapterEndpoint = execution.getVariable("URN_mso_adapters_openecomp_db_endpoint")
+ execution.setVariable("CVFMI_dbAdapterEndpoint", dbAdapterEndpoint)
+ execution.setVariable("CVFMI_updateResOperStatusRequest", body)
+ }
+
+ public void prepareUpdateBeforeCreateSDNCResource(DelegateExecution execution) {
+ def isDebugEnabled = execution.getVariable("isDebugLogEnabled")
+ utils.log("INFO"," ***** Started prepareUpdateBeforeCreateSDNCResource *****", isDebugEnabled)
+
+ ResourceInput resourceInputObj = execution.getVariable(Prefix + "resourceInput")
+ String operType = resourceInputObj.getOperationType()
+ String resourceCustomizationUuid = resourceInputObj.getResourceModelInfo().getModelCustomizationUuid()
+ String ServiceInstanceId = resourceInputObj.getServiceInstanceId()
+ String operationId = resourceInputObj.getOperationId()
+ String modelName = resourceInputObj.getResourceModelInfo().getModelName()
+ String progress = "20"
+ String status = "processing"
+ String statusDescription = "Create " + modelName
+
+ execution.getVariable("operationId")
+
+ String body = """
+ <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
+ xmlns:ns="http://org.openecomp.mso/requestsdb">
+ <soapenv:Header/>
+ <soapenv:Body>
+ <ns:updateResourceOperationStatus>
+ <operType>${operType}</operType>
+ <operationId>${operationId}</operationId>
+ <progress>${progress}</progress>
+ <resourceTemplateUUID>${resourceCustomizationUuid}</resourceTemplateUUID>
+ <serviceId>${ServiceInstanceId}</serviceId>
+ <status>${status}</status>
+ <statusDescription>${statusDescription}</statusDescription>
+ </ns:updateResourceOperationStatus>
+ </soapenv:Body>
+ </soapenv:Envelope>""";
+
+ setProgressUpdateVariables(execution, body)
+ utils.log("INFO"," ***** End prepareUpdateBeforeCreateSDNCResource *****", isDebugEnabled)
+ }
+
+ public void postCreateSDNC(DelegateExecution execution) {
+ def isDebugEnabled = execution.getVariable("isDebugLogEnabled")
+ ServicePluginFactory.getInstance().test()
+ utils.log("INFO"," ***** Started postCreateSDNC *****", isDebugEnabled)
+ String sdnc_apiType = execution.getVariable(Prefix + "apiType")
+ String sdncAdapterResponse = execution.getVariable("sdncAdapterResponse")
+ utils.log("INFO","sdncAdapterResponse for create:" + sdncAdapterResponse , isDebugEnabled)
+ sdncAdapterResponse = sdncAdapterResponse.replace("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n", "")
+ sdncAdapterResponse = sdncAdapterResponse.replaceAll('tag0:', '').replaceAll(':tag0', '')
+ utils.log("INFO","sdncAdapterResponse for create after replace:" + sdncAdapterResponse , isDebugEnabled)
+ //if it is vnf we need to query the vnf-id,if it is network , we need to query the network-id
+ String sdncRespData = utils.getNodeText1(sdncAdapterResponse, "RequestData")
+ utils.log("INFO","sdncRespData:" + sdncRespData , isDebugEnabled)
+ String objectKey = "/" + sdnc_apiType + "/"
+ String objectDataKey = "/" + sdnc_apiType + "-data/"
+ String objectPath = utils.getNodeText1(sdncRespData, "object-path")
+
+ String resourceObjId = objectPath.substring(objectPath.indexOf(objectKey) + objectKey.length(), objectPath.indexOf(objectDataKey))
+ utils.log("INFO", "resourceObjId:" + resourceObjId, isDebugEnabled)
+ execution.setVariable(Prefix + "sdncResourceId", resourceObjId)
+
+ utils.log("INFO"," ***** End postCreateSDNC *****", isDebugEnabled)
+
+ }
+
+ public void postActivateSDNC(DelegateExecution execution) {
+ def isDebugEnabled = execution.getVariable("isDebugLogEnabled")
+ utils.log("INFO"," ***** Started postActivateSDNC *****", isDebugEnabled)
+ String sdncAdapterResponse = execution.getVariable("sdncAdapterResponse")
+ utils.log("INFO","sdncAdapterResponse for activate:" + sdncAdapterResponse , isDebugEnabled)
+ utils.log("INFO"," ***** End postActivateSDNC *****", isDebugEnabled)
+ }
+
+ public void prepareUpdateAfterCreateSDNCResource(DelegateExecution execution) {
+ def isDebugEnabled = execution.getVariable("isDebugLogEnabled")
+ utils.log("INFO"," ***** Started prepareUpdateAfterCreateSDNCResource *****", isDebugEnabled)
+ ResourceInput resourceInputObj = execution.getVariable(Prefix + "resourceInput")
+ String operType = resourceInputObj.getOperationType()
+ String resourceCustomizationUuid = resourceInputObj.getResourceModelInfo().getModelCustomizationUuid()
+ String ServiceInstanceId = resourceInputObj.getServiceInstanceId()
+ String modelName = resourceInputObj.getResourceModelInfo().getModelName()
+ String operationId = resourceInputObj.getOperationId()
+ String progress = "50"
+ String status = "processing"
+ String statusDescription = "Instantiate " + modelName
+
+ execution.getVariable("operationId")
+
+ String body = """
+ <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
+ xmlns:ns="http://org.openecomp.mso/requestsdb">
+ <soapenv:Header/>
+ <soapenv:Body>
+ <ns:updateResourceOperationStatus>
+ <operType>${operType}</operType>
+ <operationId>${operationId}</operationId>
+ <progress>${progress}</progress>
+ <resourceTemplateUUID>${resourceCustomizationUuid}</resourceTemplateUUID>
+ <serviceId>${ServiceInstanceId}</serviceId>
+ <status>${status}</status>
+ <statusDescription>${statusDescription}</statusDescription>
+ </ns:updateResourceOperationStatus>
+ </soapenv:Body>
+ </soapenv:Envelope>""";
+
+ setProgressUpdateVariables(execution, body)
+ utils.log("INFO"," ***** End prepareUpdateAfterCreateSDNCResource *****", isDebugEnabled)
+ }
+
+ public void prepareUpdateAfterActivateSDNCResource(DelegateExecution execution) {
+ def isDebugEnabled = execution.getVariable("isDebugLogEnabled")
+ utils.log("INFO"," ***** Started prepareUpdateAfterActivateSDNCResource *****", isDebugEnabled)
+ ResourceInput resourceInputObj = execution.getVariable(Prefix + "resourceInput")
+ String operType = resourceInputObj.getOperationType()
+ String resourceCustomizationUuid = resourceInputObj.getResourceModelInfo().getModelCustomizationUuid()
+ String ServiceInstanceId = resourceInputObj.getServiceInstanceId()
+ String modelName = resourceInputObj.getResourceModelInfo().getModelName()
+ String operationId = resourceInputObj.getOperationId()
+ String progress = "100"
+ String status = "finished"
+ String statusDescription = "Instantiate " + modelName + " finished"
+
+ execution.getVariable("operationId")
+
+ String body = """
+ <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
+ xmlns:ns="http://org.openecomp.mso/requestsdb">
+ <soapenv:Header/>
+ <soapenv:Body>
+ <ns:updateResourceOperationStatus>
+ <operType>${operType}</operType>
+ <operationId>${operationId}</operationId>
+ <progress>${progress}</progress>
+ <resourceTemplateUUID>${resourceCustomizationUuid}</resourceTemplateUUID>
+ <serviceId>${ServiceInstanceId}</serviceId>
+ <status>${status}</status>
+ <statusDescription>${statusDescription}</statusDescription>
+ </ns:updateResourceOperationStatus>
+ </soapenv:Body>
+ </soapenv:Envelope>""";
+
+ setProgressUpdateVariables(execution, body)
+ utils.log("INFO"," ***** End prepareUpdateAfterActivateSDNCResource *****", isDebugEnabled)
+ }
+
+ public void postCreateSDNCCall(DelegateExecution execution){
+ def isDebugEnabled = execution.getVariable("isDebugLogEnabled")
+ utils.log("INFO"," ***** Started prepareSDNCRequest *****", isDebugEnabled)
+ String responseCode = execution.getVariable(Prefix + "sdncCreateReturnCode")
+ String responseObj = execution.getVariable(Prefix + "SuccessIndicator")
+
+ utils.log("INFO","response from sdnc, response code :" + responseCode + " response object :" + responseObj, isDebugEnabled)
+ utils.log("INFO"," ***** Exit prepareSDNCRequest *****", isDebugEnabled)
+ }
+
+ public void sendSyncResponse (DelegateExecution execution) {
+ def isDebugEnabled=execution.getVariable("isDebugLogEnabled")
+ utils.log("DEBUG", " *** sendSyncResponse *** ", isDebugEnabled)
+
+ try {
+ String operationStatus = "finished"
+ // RESTResponse for main flow
+ String resourceOperationResp = """{"operationStatus":"${operationStatus}"}""".trim()
+ utils.log("DEBUG", " sendSyncResponse to APIH:" + "\n" + resourceOperationResp, isDebugEnabled)
+ sendWorkflowResponse(execution, 202, resourceOperationResp)
+ execution.setVariable("sentSyncResponse", true)
+
+ } catch (Exception ex) {
+ String msg = "Exceptuion in sendSyncResponse:" + ex.getMessage()
+ utils.log("DEBUG", msg, isDebugEnabled)
+ exceptionUtil.buildAndThrowWorkflowException(execution, 7000, msg)
+ }
+ utils.log("DEBUG"," ***** Exit sendSyncResopnse *****", isDebugEnabled)
+ }
+}
diff --git a/bpmn/MSOInfrastructureBPMN/src/main/groovy/org/openecomp/mso/bpmn/infrastructure/scripts/DoCreateE2EServiceInstance.groovy b/bpmn/MSOInfrastructureBPMN/src/main/groovy/org/openecomp/mso/bpmn/infrastructure/scripts/DoCreateE2EServiceInstance.groovy
index 73d51c99b0..eaec39b5fd 100644
--- a/bpmn/MSOInfrastructureBPMN/src/main/groovy/org/openecomp/mso/bpmn/infrastructure/scripts/DoCreateE2EServiceInstance.groovy
+++ b/bpmn/MSOInfrastructureBPMN/src/main/groovy/org/openecomp/mso/bpmn/infrastructure/scripts/DoCreateE2EServiceInstance.groovy
@@ -46,6 +46,7 @@ import org.openecomp.mso.bpmn.core.WorkflowException
import org.openecomp.mso.rest.APIResponse;
import org.openecomp.mso.rest.RESTClient
import org.openecomp.mso.rest.RESTConfig
+import org.openecomp.mso.bpmn.infrastructure.workflow.service.ServicePluginFactory
import java.util.List;
import java.util.UUID;
@@ -221,8 +222,20 @@ public class DoCreateE2EServiceInstance extends AbstractServiceTaskProcessor {
}
}
+ public void doServicePreOperation(DelegateExecution execution){
+ //we need a service plugin platform here.
+ ServiceDecomposition serviceDecomposition = execution.getVariable("serviceDecomposition")
+ String uuiRequest = execution.getVariable("uuiRequest")
+ String newUuiRequest = ServicePluginFactory.getInstance().preProcessService(serviceDecomposition, uuiRequest);
+ execution.setVariable("uuiRequest", newUuiRequest)
+ }
+
public void doServiceHoming(DelegateExecution execution) {
- //Now Homing is not clear. So to be implemented.
+ //we need a service plugin platform here.
+ ServiceDecomposition serviceDecomposition = execution.getVariable("serviceDecomposition")
+ String uuiRequest = execution.getVariable("uuiRequest")
+ String newUuiRequest = ServicePluginFactory.getInstance().doServiceHoming(serviceDecomposition, uuiRequest);
+ execution.setVariable("uuiRequest", newUuiRequest)
}
public void postProcessAAIGET(DelegateExecution execution) {
diff --git a/bpmn/MSOInfrastructureBPMN/src/main/java/org/openecomp/mso/bpmn/infrastructure/workflow/service/ServicePluginFactory.java b/bpmn/MSOInfrastructureBPMN/src/main/java/org/openecomp/mso/bpmn/infrastructure/workflow/service/ServicePluginFactory.java
new file mode 100644
index 0000000000..344d8cd4fa
--- /dev/null
+++ b/bpmn/MSOInfrastructureBPMN/src/main/java/org/openecomp/mso/bpmn/infrastructure/workflow/service/ServicePluginFactory.java
@@ -0,0 +1,443 @@
+/*-
+ * ============LICENSE_START=======================================================
+ * ONAP - SO
+ * ================================================================================
+ * Copyright (C) 2018 Huawei Intellectual Property. All rights reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.openecomp.mso.bpmn.infrastructure.workflow.service;
+
+import java.io.IOException;
+import java.net.SocketTimeoutException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.http.HttpResponse;
+import org.apache.http.ParseException;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.client.methods.HttpDelete;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.methods.HttpRequestBase;
+import org.apache.http.conn.ConnectTimeoutException;
+import org.apache.http.entity.ContentType;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.util.EntityUtils;
+import org.camunda.bpm.engine.runtime.Execution;
+import org.openecomp.mso.bpmn.core.PropertyConfiguration;
+import org.openecomp.mso.bpmn.core.domain.ServiceDecomposition;
+import org.openecomp.mso.bpmn.core.json.JsonUtils;
+import org.openecomp.mso.logger.MessageEnum;
+import org.openecomp.mso.logger.MsoLogger;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+
+public class ServicePluginFactory {
+
+ // SOTN calculate route
+ public static final String OOF_Default_EndPoint = "http://192.168.1.223:8443/oof/sotncalc";
+
+ public static final String Third_SP_Default_EndPoint = "http://192.168.1.223:8443/sp/resourcemgr/querytps";
+
+ private static final int DEFAULT_TIME_OUT = 60000;
+
+ static JsonUtils jsonUtil = new JsonUtils();
+
+ private static MsoLogger LOGGER = MsoLogger.getMsoLogger(MsoLogger.Catalog.RA);
+
+ private static ServicePluginFactory instance;
+
+
+ public static synchronized ServicePluginFactory getInstance() {
+ if (null == instance) {
+ instance = new ServicePluginFactory();
+ }
+ return instance;
+ }
+
+ private ServicePluginFactory() {
+
+ }
+
+ public String test()
+ {
+ return "";
+ }
+
+ private String getThirdSPEndPoint(){
+ Map<String, String> properties = PropertyConfiguration.getInstance().getProperties("topology.properties");
+ if (properties != null) {
+ String thirdSPEndPoint = properties.get("third-sp-endpoint");
+ if(null != thirdSPEndPoint && !thirdSPEndPoint.isEmpty()){
+ return thirdSPEndPoint;
+ }
+ }
+ return Third_SP_Default_EndPoint;
+ }
+
+ private String getOOFCalcEndPoint(){
+ Map<String, String> properties = PropertyConfiguration.getInstance().getProperties("topology.properties");
+ if (properties != null) {
+ String oofCalcEndPoint = properties.get("oof-calc-endpoint");
+ if(null != oofCalcEndPoint && !oofCalcEndPoint.isEmpty()){
+ return oofCalcEndPoint;
+ }
+ }
+ return OOF_Default_EndPoint;
+ }
+
+
+ public String preProcessService(ServiceDecomposition serviceDecomposition, String uuiRequest) {
+
+ // now only for sotn
+ if (isSOTN(serviceDecomposition, uuiRequest)) {
+ // We Need to query the terminalpoint of the VPN by site location
+ // info
+ return preProcessSOTNService(serviceDecomposition, uuiRequest);
+ }
+ return uuiRequest;
+ }
+
+ public String doServiceHoming(ServiceDecomposition serviceDecomposition, String uuiRequest) {
+ // now only for sotn
+ if (isSOTN(serviceDecomposition, uuiRequest)) {
+ return doSOTNServiceHoming(serviceDecomposition, uuiRequest);
+ }
+ return uuiRequest;
+ }
+
+ private boolean isSOTN(ServiceDecomposition serviceDecomposition, String uuiRequest) {
+ // there should be a register platform , we check it very simple here.
+ return uuiRequest.contains("clientSignal") && uuiRequest.contains("vpnType");
+ }
+
+ private String preProcessSOTNService(ServiceDecomposition serviceDecomposition, String uuiRequest) {
+ Map<String, Object> uuiObject = getJsonObject(uuiRequest, Map.class);
+ Map<String, Object> serviceObject = (Map<String, Object>) uuiObject.get("service");
+ Map<String, Object> serviceParametersObject = (Map<String, Object>) serviceObject.get("parameters");
+ Map<String, Object> serviceRequestInputs = (Map<String, Object>) serviceParametersObject.get("requestInputs");
+ List<Object> resources = (List<Object>) serviceParametersObject.get("resources");
+ // This is a logic for demo , it could not be finalized to community.
+ String srcLocation = "";
+ String dstLocation = "";
+ String srcClientSignal = "";
+ String dstClientSignal = "";
+ // support R2 uuiReq and R1 uuiReq
+ // logic for R2 uuiRequest params in service level
+ for (Entry<String, Object> entry : serviceRequestInputs.entrySet()) {
+ if (entry.getKey().toLowerCase().contains("location")) {
+ if ("".equals(srcLocation)) {
+ srcLocation = (String) entry.getValue();
+ } else if ("".equals(dstLocation)) {
+ dstLocation = (String) entry.getValue();
+ }
+ }
+ if (entry.getKey().toLowerCase().contains("clientsignal")) {
+ if ("".equals(srcClientSignal)) {
+ srcClientSignal = (String) entry.getValue();
+ } else if ("".equals(dstClientSignal)) {
+ dstClientSignal = (String) entry.getValue();
+ }
+ }
+ }
+
+ // logic for R1 uuiRequest, params in resource level
+ for (Object resource : resources) {
+ Map<String, Object> resourceObject = (Map<String, Object>) resource;
+ Map<String, Object> resourceParametersObject = (Map<String, Object>) resourceObject.get("parameters");
+ Map<String, Object> resourceRequestInputs = (Map<String, Object>) resourceParametersObject
+ .get("requestInputs");
+ for (Entry<String, Object> entry : resourceRequestInputs.entrySet()) {
+ if (entry.getKey().toLowerCase().contains("location")) {
+ if ("".equals(srcLocation)) {
+ srcLocation = (String) entry.getValue();
+ } else if ("".equals(dstLocation)) {
+ dstLocation = (String) entry.getValue();
+ }
+ }
+ if (entry.getKey().toLowerCase().contains("clientsignal")) {
+ if ("".equals(srcClientSignal)) {
+ srcClientSignal = (String) entry.getValue();
+ } else if ("".equals(dstClientSignal)) {
+ dstClientSignal = (String) entry.getValue();
+ }
+ }
+ }
+ }
+
+ Map<String, Object> vpnRequestInputs = getVPNResourceRequestInputs(resources);
+ // here we put client signal to vpn resource inputs
+ vpnRequestInputs.put("src-client-signal", srcClientSignal);
+ vpnRequestInputs.put("dst-client-signal", dstClientSignal);
+
+ // Now we need to query terminal points from SP resourcemgr system.
+ List<Object> locationTerminalPointList = queryTerminalPointsFromServiceProviderSystem(srcLocation, dstLocation);
+ Map<String, Object> tpInfoMap = (Map<String, Object>) locationTerminalPointList.get(0);
+
+ serviceRequestInputs.put("inner-src-access-provider-id", tpInfoMap.get("access-provider-id"));
+ serviceRequestInputs.put("inner-src-access-client-id", tpInfoMap.get("access-client-id"));
+ serviceRequestInputs.put("inner-src-access-topology-id", tpInfoMap.get("access-topology-id"));
+ serviceRequestInputs.put("inner-src-access-node-id", tpInfoMap.get("access-node-id"));
+ serviceRequestInputs.put("inner-src-access-ltp-id", tpInfoMap.get("access-ltp-id"));
+ tpInfoMap = (Map<String, Object>) locationTerminalPointList.get(1);
+
+ serviceRequestInputs.put("inner-dst-access-provider-id", tpInfoMap.get("access-provider-id"));
+ serviceRequestInputs.put("inner-dst-access-client-id", tpInfoMap.get("access-client-id"));
+ serviceRequestInputs.put("inner-dst-access-topology-id", tpInfoMap.get("access-topology-id"));
+ serviceRequestInputs.put("inner-dst-access-node-id", tpInfoMap.get("access-node-id"));
+ serviceRequestInputs.put("inner-dst-access-ltp-id", tpInfoMap.get("access-ltp-id"));
+
+ String newRequest = getJsonString(uuiObject);
+ return newRequest;
+ }
+
+ private List<Object> queryTerminalPointsFromServiceProviderSystem(String srcLocation, String dstLocation) {
+ Map<String, String> locationSrc = new HashMap<String, String>();
+ locationSrc.put("location", srcLocation);
+ Map<String, String> locationDst = new HashMap<String, String>();
+ locationDst.put("location", dstLocation);
+ List<Map<String, String>> locations = new ArrayList<Map<String, String>>();
+ locations.add(locationSrc);
+ locations.add(locationDst);
+ List<Object> returnList = new ArrayList<Object>();
+ String reqContent = getJsonString(locations);
+ String url = getThirdSPEndPoint();
+ String responseContent = sendRequest(url, "POST", reqContent);
+ if (null != responseContent) {
+ returnList = getJsonObject(responseContent, List.class);
+ }
+ return returnList;
+ }
+
+ private Map<String, Object> getVPNResourceRequestInputs(List<Object> resources) {
+ for (Object resource : resources) {
+ Map<String, Object> resourceObject = (Map<String, Object>) resource;
+ Map<String, Object> resourceParametersObject = (Map<String, Object>) resourceObject.get("parameters");
+ Map<String, Object> resourceRequestInputs = (Map<String, Object>) resourceParametersObject
+ .get("requestInputs");
+ for (Entry<String, Object> entry : resourceRequestInputs.entrySet()) {
+ if (entry.getKey().toLowerCase().contains("vpntype")) {
+ return resourceRequestInputs;
+ }
+ }
+ }
+ return null;
+ }
+
+ public static void main(String args[]){
+ String str = "restconf/config/GENERIC-RESOURCE-API:services/service/eca7e542-12ba-48de-8544-fac59303b14e/service-data/networks/network/aec07806-1671-4af2-b722-53c8e320a633/network-data/";
+
+ int index1 = str.indexOf("/network/");
+ int index2 = str.indexOf("/network-data");
+
+ String str1 = str.substring(index1 + "/network/".length(), index2);
+ System.out.println(str1);
+
+ }
+
+ private String doSOTNServiceHoming(ServiceDecomposition serviceDecomposition, String uuiRequest) {
+ // query the route for the service.
+ Map<String, Object> uuiObject = getJsonObject(uuiRequest, Map.class);
+ Map<String, Object> serviceObject = (Map<String, Object>) uuiObject.get("service");
+ Map<String, Object> serviceParametersObject = (Map<String, Object>) serviceObject.get("parameters");
+ Map<String, Object> serviceRequestInputs = (Map<String, Object>) serviceParametersObject.get("requestInputs");
+ Map<String, Object> oofQueryObject = new HashMap<String, Object>();
+ List<Object> resources = (List<Object>) serviceParametersObject.get("resources");
+ oofQueryObject.put("src-access-provider-id", serviceRequestInputs.get("inner-src-access-provider-id"));
+ oofQueryObject.put("src-access-client-id", serviceRequestInputs.get("inner-src-access-client-id"));
+ oofQueryObject.put("src-access-topology-id", serviceRequestInputs.get("inner-src-access-topology-id"));
+ oofQueryObject.put("src-access-node-id", serviceRequestInputs.get("inner-src-access-node-id"));
+ oofQueryObject.put("src-access-ltp-id", serviceRequestInputs.get("inner-src-access-ltp-id"));
+ oofQueryObject.put("dst-access-provider-id", serviceRequestInputs.get("inner-dst-access-provider-id"));
+ oofQueryObject.put("dst-access-client-id", serviceRequestInputs.get("inner-dst-access-client-id"));
+ oofQueryObject.put("dst-access-topology-id", serviceRequestInputs.get("inner-dst-access-topology-id"));
+ oofQueryObject.put("dst-access-node-id", serviceRequestInputs.get("inner-dst-access-node-id"));
+ oofQueryObject.put("dst-access-ltp-id", serviceRequestInputs.get("inner-dst-access-ltp-id"));
+ String oofRequestReq = getJsonString(oofQueryObject);
+ String url = getOOFCalcEndPoint();
+ String responseContent = sendRequest(url, "POST", oofRequestReq);
+
+ List<Object> returnList = new ArrayList<Object>();
+ if (null != responseContent) {
+ returnList = getJsonObject(responseContent, List.class);
+ }
+ // in demo we have only one VPN. no cross VPNs, so get first item.
+ Map<String, Object> returnRoute = getReturnRoute(returnList);
+ Map<String, Object> vpnRequestInputs = getVPNResourceRequestInputs(resources);
+ vpnRequestInputs.putAll(returnRoute);
+ String newRequest = getJsonString(uuiObject);
+ return newRequest;
+ }
+
+ private Map<String, Object> getReturnRoute(List<Object> returnList){
+ Map<String, Object> returnRoute = new HashMap<String,Object>();
+ for(Object returnVpn :returnList){
+ Map<String, Object> returnVpnInfo = (Map<String, Object>) returnVpn;
+ String accessTopoId = (String)returnVpnInfo.get("access-topology-id");
+ if("100".equals(accessTopoId)){
+ returnRoute.putAll(returnVpnInfo);
+ }
+ else if("101".equals(accessTopoId)){
+ for(String key : returnVpnInfo.keySet()){
+ returnRoute.put("domain1-" + key, returnVpnInfo.get(key));
+ }
+ }
+ else if("102".equals(accessTopoId)){
+ for(String key : returnVpnInfo.keySet()){
+ returnRoute.put("domain2-" + key, returnVpnInfo.get(key));
+ }
+ }
+ else{
+ for(String key : returnVpnInfo.keySet()){
+ returnRoute.put("domain" + accessTopoId +"-" + key, returnVpnInfo.get(key));
+ }
+ }
+ }
+ return returnRoute;
+ }
+
+ private Map<String, Object> getResourceParams(Execution execution, String resourceCustomizationUuid,
+ String serviceParameters) {
+ List<String> resourceList = jsonUtil.StringArrayToList(execution,
+ (String) JsonUtils.getJsonValue(serviceParameters, "resources"));
+ // Get the right location str for resource. default is an empty array.
+ String resourceInputsFromUui = "";
+ for (String resource : resourceList) {
+ String resCusUuid = (String) JsonUtils.getJsonValue(resource, "resourceCustomizationUuid");
+ if (resourceCustomizationUuid.equals(resCusUuid)) {
+ String resourceParameters = JsonUtils.getJsonValue(resource, "parameters");
+ resourceInputsFromUui = JsonUtils.getJsonValue(resourceParameters, "requestInputs");
+ }
+ }
+ Map<String, Object> resourceInputsFromUuiMap = getJsonObject(resourceInputsFromUui, Map.class);
+ return resourceInputsFromUuiMap;
+ }
+
+ public static <T> T getJsonObject(String jsonstr, Class<T> type) {
+ ObjectMapper mapper = new ObjectMapper();
+ mapper.configure(SerializationFeature.WRAP_ROOT_VALUE, true);
+ try {
+ return mapper.readValue(jsonstr, type);
+ } catch (IOException e) {
+ LOGGER.error(MessageEnum.RA_NS_EXC, "", "", MsoLogger.ErrorCode.BusinessProcesssError,
+ "fail to unMarshal json", e);
+ }
+ return null;
+ }
+
+ public static String getJsonString(Object srcObj) {
+ ObjectMapper mapper = new ObjectMapper();
+ mapper.configure(SerializationFeature.WRAP_ROOT_VALUE, false);
+ String jsonStr = null;
+ try {
+ jsonStr = mapper.writeValueAsString(srcObj);
+ } catch (JsonProcessingException e) {
+ LOGGER.debug("SdcToscaParserException", e);
+ e.printStackTrace();
+ }
+ return jsonStr;
+ }
+
+ private static String sendRequest(String url, String methodType, String content) {
+
+ String msbUrl = url;
+ HttpRequestBase method = null;
+ HttpResponse httpResponse = null;
+
+ try {
+ int timeout = DEFAULT_TIME_OUT;
+
+ RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout).setConnectTimeout(timeout)
+ .setConnectionRequestTimeout(timeout).build();
+
+ HttpClient client = HttpClientBuilder.create().build();
+
+ if ("POST".equals(methodType.toUpperCase())) {
+ HttpPost httpPost = new HttpPost(msbUrl);
+ httpPost.setConfig(requestConfig);
+ httpPost.setEntity(new StringEntity(content, ContentType.APPLICATION_JSON));
+ method = httpPost;
+ } else if ("PUT".equals(methodType.toUpperCase())) {
+ HttpPut httpPut = new HttpPut(msbUrl);
+ httpPut.setConfig(requestConfig);
+ httpPut.setEntity(new StringEntity(content, ContentType.APPLICATION_JSON));
+ method = httpPut;
+ } else if ("GET".equals(methodType.toUpperCase())) {
+ HttpGet httpGet = new HttpGet(msbUrl);
+ httpGet.setConfig(requestConfig);
+ method = httpGet;
+ } else if ("DELETE".equals(methodType.toUpperCase())) {
+ HttpDelete httpDelete = new HttpDelete(msbUrl);
+ httpDelete.setConfig(requestConfig);
+ method = httpDelete;
+ }
+
+ // now have no auth
+ // String userCredentials =
+ // SDNCAdapterProperties.getEncryptedProperty(Constants.SDNC_AUTH_PROP,
+ // Constants.DEFAULT_SDNC_AUTH, Constants.ENCRYPTION_KEY);
+ // String authorization = "Basic " +
+ // DatatypeConverter.printBase64Binary(userCredentials.getBytes());
+ // method.setHeader("Authorization", authorization);
+
+ httpResponse = client.execute(method);
+ String responseContent = null;
+ if (null != httpResponse && httpResponse.getEntity() != null) {
+ try {
+ responseContent = EntityUtils.toString(httpResponse.getEntity(), "UTF-8");
+ } catch (ParseException e) {
+ e.printStackTrace();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ if (null != method) {
+ method.reset();
+ }
+ method = null;
+ return responseContent;
+
+ } catch (SocketTimeoutException | ConnectTimeoutException e) {
+ return null;
+
+ } catch (Exception e) {
+ return null;
+
+ } finally {
+ if (httpResponse != null) {
+ try {
+ EntityUtils.consume(httpResponse.getEntity());
+ } catch (Exception e) {
+ }
+ }
+ if (method != null) {
+ try {
+ method.reset();
+ } catch (Exception e) {
+
+ }
+ }
+ }
+ }
+}
diff --git a/bpmn/MSOInfrastructureBPMN/src/main/java/org/openecomp/mso/bpmn/infrastructure/workflow/serviceTask/client/builder/AbstractBuilder.java b/bpmn/MSOInfrastructureBPMN/src/main/java/org/openecomp/mso/bpmn/infrastructure/workflow/serviceTask/client/builder/AbstractBuilder.java
index 09561a620e..2f5bda6f46 100644
--- a/bpmn/MSOInfrastructureBPMN/src/main/java/org/openecomp/mso/bpmn/infrastructure/workflow/serviceTask/client/builder/AbstractBuilder.java
+++ b/bpmn/MSOInfrastructureBPMN/src/main/java/org/openecomp/mso/bpmn/infrastructure/workflow/serviceTask/client/builder/AbstractBuilder.java
@@ -231,7 +231,7 @@ public abstract class AbstractBuilder<IN, OUT> {
protected ServiceInformationEntity getServiceInformationEntity(DelegateExecution execution) {
ServiceInformationEntity serviceInformationEntity = new ServiceInformationEntity();
- serviceInformationEntity.setServiceId("VOLTE_SERVICE_ID");
+ serviceInformationEntity.setServiceId((String) execution.getVariable("serviceInstanceId"));
serviceInformationEntity.setSubscriptionServiceType((String) execution.getVariable("serviceType"));
serviceInformationEntity.setOnapModelInformation(getOnapServiceModelInformationEntity(execution));
serviceInformationEntity.setServiceInstanceId((String) execution.getVariable("serviceInstanceId"));
diff --git a/bpmn/MSOInfrastructureBPMN/src/main/resources/process/CreateActivateSDNCResource.bpmn b/bpmn/MSOInfrastructureBPMN/src/main/resources/process/CreateActivateSDNCResource.bpmn
new file mode 100644
index 0000000000..f3c629f9db
--- /dev/null
+++ b/bpmn/MSOInfrastructureBPMN/src/main/resources/process/CreateActivateSDNCResource.bpmn
@@ -0,0 +1,393 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" id="Definitions_1" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="1.10.0">
+ <bpmn:process id="CreateActivateSDNCResource" name="CreateActivateSDNCResource" isExecutable="true">
+ <bpmn:startEvent id="createSDNCRES_StartEvent" name="createSDNCRES_StartEvent">
+ <bpmn:outgoing>SequenceFlow_1qo2pln</bpmn:outgoing>
+ </bpmn:startEvent>
+ <bpmn:sequenceFlow id="SequenceFlow_1qo2pln" sourceRef="createSDNCRES_StartEvent" targetRef="Task_1dlrfiw" />
+ <bpmn:sequenceFlow id="SequenceFlow_0khtova" sourceRef="PreprocessIncomingRequest_task" targetRef="Task_0tezqd4" />
+ <bpmn:scriptTask id="PreprocessIncomingRequest_task" name="prepare SDNC Create Request" scriptFormat="groovy">
+ <bpmn:incoming>SequenceFlow_18l3crb</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_0khtova</bpmn:outgoing>
+ <bpmn:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
+def dcsi = new CreateActivateSDNCResource()
+dcsi.prepareSDNCRequest(execution)]]></bpmn:script>
+ </bpmn:scriptTask>
+ <bpmn:endEvent id="EndEvent_1x6k78c" name="create SDNC call end">
+ <bpmn:incoming>SequenceFlow_17md60u</bpmn:incoming>
+ </bpmn:endEvent>
+ <bpmn:callActivity id="CallActivity_1600xlj" name="Call SDNC RSRC Create&#10; Adapter V1&#10;" calledElement="sdncAdapter">
+ <bpmn:extensionElements>
+ <camunda:in source="CRESDNCRES_activateSDNCRequest" target="sdncAdapterWorkflowRequest" />
+ <camunda:in source="mso-request-id" target="mso-request-id" />
+ <camunda:in source="mso-service-instance-id" target="mso-service-instance-id" />
+ <camunda:out source="sdncAdapterResponse" target="sdncAdapterResponse" />
+ <camunda:out source="SDNCA_ResponseCode" target="SDNCA_ResponseCode" />
+ <camunda:out source="SDNCA_SuccessIndicator" target="SDNCA_SuccessIndicator" />
+ <camunda:out source="WorkflowException" target="WorkflowException" />
+ <camunda:in source="sdncAdapterWorkflowRequest" target="sdncAdapterWorkflowRequest" />
+ </bpmn:extensionElements>
+ <bpmn:incoming>SequenceFlow_15mvedq</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_1xk5xed</bpmn:outgoing>
+ </bpmn:callActivity>
+ <bpmn:sequenceFlow id="SequenceFlow_1xk5xed" sourceRef="CallActivity_1600xlj" targetRef="Task_0mszkkr" />
+ <bpmn:sequenceFlow id="SequenceFlow_0ow44q0" sourceRef="Task_023hred" targetRef="ScriptTask_1g5zyi6" />
+ <bpmn:scriptTask id="Task_023hred" name="post SDNC create call">
+ <bpmn:incoming>SequenceFlow_1afu5al</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_0ow44q0</bpmn:outgoing>
+ <bpmn:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
+def dcsi = new CreateActivateSDNCResource()
+dcsi.postCreateSDNCCall(execution)]]></bpmn:script>
+ </bpmn:scriptTask>
+ <bpmn:sequenceFlow id="SequenceFlow_0w2es8j" sourceRef="Task_1dlrfiw" targetRef="Task_13sx2bp" />
+ <bpmn:sequenceFlow id="SequenceFlow_18l3crb" sourceRef="Task_13sx2bp" targetRef="PreprocessIncomingRequest_task" />
+ <bpmn:scriptTask id="Task_1dlrfiw" name="Set the Recipe DesignTimeParam" scriptFormat="groovy">
+ <bpmn:incoming>SequenceFlow_1qo2pln</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_0w2es8j</bpmn:outgoing>
+ <bpmn:script><![CDATA[String recipeParamXsdDemo="""{"operationType":"VPN","apiType":"network"}"""
+String recipeParamXsd=""
+execution.setVariable("recipeParamXsd", recipeParamXsd)]]></bpmn:script>
+ </bpmn:scriptTask>
+ <bpmn:scriptTask id="Task_13sx2bp" name="Pre Process Request" scriptFormat="groovy">
+ <bpmn:incoming>SequenceFlow_0w2es8j</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_18l3crb</bpmn:outgoing>
+ <bpmn:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
+def dcsi = new CreateActivateSDNCResource()
+dcsi.preProcessRequest(execution)]]></bpmn:script>
+ </bpmn:scriptTask>
+ <bpmn:sequenceFlow id="SequenceFlow_1mz0vdx" sourceRef="Task_0tezqd4" targetRef="Task_18tomkl" />
+ <bpmn:sequenceFlow id="SequenceFlow_15mvedq" sourceRef="Task_18tomkl" targetRef="CallActivity_1600xlj" />
+ <bpmn:scriptTask id="Task_0tezqd4" name="Create progress update parameters before create" scriptFormat="groovy">
+ <bpmn:incoming>SequenceFlow_0khtova</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_1mz0vdx</bpmn:outgoing>
+ <bpmn:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
+def dcsi = new CreateActivateSDNCResource()
+dcsi.prepareUpdateBeforeCreateSDNCResource(execution)]]></bpmn:script>
+ </bpmn:scriptTask>
+ <bpmn:scriptTask id="Task_0uwlr22" name="Create progress update parameters After create" scriptFormat="groovy">
+ <bpmn:incoming>SequenceFlow_0ruppyi</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_1jr6zi0</bpmn:outgoing>
+ <bpmn:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
+def dcsi = new CreateActivateSDNCResource()
+dcsi.prepareUpdateAfterCreateSDNCResource(execution)]]></bpmn:script>
+ </bpmn:scriptTask>
+ <bpmn:serviceTask id="Task_18tomkl" name="update progress update">
+ <bpmn:extensionElements>
+ <camunda:connector>
+ <camunda:inputOutput>
+ <camunda:inputParameter name="url">${CVFMI_dbAdapterEndpoint}</camunda:inputParameter>
+ <camunda:inputParameter name="headers">
+ <camunda:map>
+ <camunda:entry key="content-type">application/soap+xml</camunda:entry>
+ <camunda:entry key="Authorization">Basic QlBFTENsaWVudDpwYXNzd29yZDEk</camunda:entry>
+ </camunda:map>
+ </camunda:inputParameter>
+ <camunda:inputParameter name="payload">${CVFMI_updateResOperStatusRequest}</camunda:inputParameter>
+ <camunda:inputParameter name="method">POST</camunda:inputParameter>
+ <camunda:outputParameter name="CVFMI_dbResponseCode">${statusCode}</camunda:outputParameter>
+ <camunda:outputParameter name="CVFMI_dbResponse">${response}</camunda:outputParameter>
+ </camunda:inputOutput>
+ <camunda:connectorId>http-connector</camunda:connectorId>
+ </camunda:connector>
+ </bpmn:extensionElements>
+ <bpmn:incoming>SequenceFlow_1mz0vdx</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_15mvedq</bpmn:outgoing>
+ </bpmn:serviceTask>
+ <bpmn:serviceTask id="ServiceTask_1cm8iwr" name="update progress update">
+ <bpmn:extensionElements>
+ <camunda:connector>
+ <camunda:inputOutput>
+ <camunda:inputParameter name="url">${CVFMI_dbAdapterEndpoint}</camunda:inputParameter>
+ <camunda:inputParameter name="headers">
+ <camunda:map>
+ <camunda:entry key="content-type">application/soap+xml</camunda:entry>
+ <camunda:entry key="Authorization">Basic QlBFTENsaWVudDpwYXNzd29yZDEk</camunda:entry>
+ </camunda:map>
+ </camunda:inputParameter>
+ <camunda:inputParameter name="payload">${CVFMI_updateResOperStatusRequest}</camunda:inputParameter>
+ <camunda:inputParameter name="method">POST</camunda:inputParameter>
+ <camunda:outputParameter name="CVFMI_dbResponseCode">${statusCode}</camunda:outputParameter>
+ <camunda:outputParameter name="CVFMI_dbResponse">${response}</camunda:outputParameter>
+ </camunda:inputOutput>
+ <camunda:connectorId>http-connector</camunda:connectorId>
+ </camunda:connector>
+ </bpmn:extensionElements>
+ <bpmn:incoming>SequenceFlow_1jr6zi0</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_10cy2nu</bpmn:outgoing>
+ </bpmn:serviceTask>
+ <bpmn:sequenceFlow id="SequenceFlow_1jr6zi0" sourceRef="Task_0uwlr22" targetRef="ServiceTask_1cm8iwr" />
+ <bpmn:scriptTask id="ScriptTask_1g5zyi6" name="Send Sync Ack Response" scriptFormat="groovy">
+ <bpmn:incoming>SequenceFlow_0ow44q0</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_17md60u</bpmn:outgoing>
+ <bpmn:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
+def csi = new CreateActivateSDNCResource()
+csi.sendSyncResponse(execution)]]></bpmn:script>
+ </bpmn:scriptTask>
+ <bpmn:sequenceFlow id="SequenceFlow_17md60u" sourceRef="ScriptTask_1g5zyi6" targetRef="EndEvent_1x6k78c" />
+ <bpmn:scriptTask id="ScriptTask_0a98d9a" name="prepare SDNC Activate Request" scriptFormat="groovy">
+ <bpmn:incoming>SequenceFlow_10cy2nu</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_0rp0tdn</bpmn:outgoing>
+ <bpmn:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
+def dcsi = new CreateActivateSDNCResource()
+dcsi.prepareSDNCActivateRequest(execution)]]></bpmn:script>
+ </bpmn:scriptTask>
+ <bpmn:sequenceFlow id="SequenceFlow_10cy2nu" sourceRef="ServiceTask_1cm8iwr" targetRef="ScriptTask_0a98d9a" />
+ <bpmn:scriptTask id="ScriptTask_1toiss1" name="Create progress update parameters After Activate" scriptFormat="groovy">
+ <bpmn:incoming>SequenceFlow_0s3vc50</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_05adaey</bpmn:outgoing>
+ <bpmn:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
+def dcsi = new CreateActivateSDNCResource()
+dcsi.prepareUpdateAfterActivateSDNCResource(execution)]]></bpmn:script>
+ </bpmn:scriptTask>
+ <bpmn:serviceTask id="ServiceTask_10e6vjg" name="update progress update">
+ <bpmn:extensionElements>
+ <camunda:connector>
+ <camunda:inputOutput>
+ <camunda:inputParameter name="url">${CVFMI_dbAdapterEndpoint}</camunda:inputParameter>
+ <camunda:inputParameter name="headers">
+ <camunda:map>
+ <camunda:entry key="content-type">application/soap+xml</camunda:entry>
+ <camunda:entry key="Authorization">Basic QlBFTENsaWVudDpwYXNzd29yZDEk</camunda:entry>
+ </camunda:map>
+ </camunda:inputParameter>
+ <camunda:inputParameter name="payload">${CVFMI_updateResOperStatusRequest}</camunda:inputParameter>
+ <camunda:inputParameter name="method">POST</camunda:inputParameter>
+ <camunda:outputParameter name="CVFMI_dbResponseCode">${statusCode}</camunda:outputParameter>
+ <camunda:outputParameter name="CVFMI_dbResponse">${response}</camunda:outputParameter>
+ </camunda:inputOutput>
+ <camunda:connectorId>http-connector</camunda:connectorId>
+ </camunda:connector>
+ </bpmn:extensionElements>
+ <bpmn:incoming>SequenceFlow_05adaey</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_1afu5al</bpmn:outgoing>
+ </bpmn:serviceTask>
+ <bpmn:sequenceFlow id="SequenceFlow_05adaey" sourceRef="ScriptTask_1toiss1" targetRef="ServiceTask_10e6vjg" />
+ <bpmn:sequenceFlow id="SequenceFlow_1afu5al" sourceRef="ServiceTask_10e6vjg" targetRef="Task_023hred" />
+ <bpmn:callActivity id="CallActivity_0pr0s2y" name="Call SDNC RSRCActivate&#10; Adapter V1&#10;" calledElement="sdncAdapter">
+ <bpmn:extensionElements>
+ <camunda:in source="CRESDNCRES_activateSDNCRequest" target="sdncAdapterWorkflowRequest" />
+ <camunda:in source="mso-request-id" target="mso-request-id" />
+ <camunda:in source="mso-service-instance-id" target="mso-service-instance-id" />
+ <camunda:out source="sdncAdapterResponse" target="sdncAdapterResponse" />
+ <camunda:out source="SDNCA_ResponseCode" target="SDNCA_ResponseCode" />
+ <camunda:out source="SDNCA_SuccessIndicator" target="SDNCA_SuccessIndicator" />
+ <camunda:out source="WorkflowException" target="WorkflowException" />
+ <camunda:in source="sdncAdapterWorkflowRequest" target="sdncAdapterWorkflowRequest" />
+ </bpmn:extensionElements>
+ <bpmn:incoming>SequenceFlow_0rp0tdn</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_1efgf9m</bpmn:outgoing>
+ </bpmn:callActivity>
+ <bpmn:sequenceFlow id="SequenceFlow_1efgf9m" sourceRef="CallActivity_0pr0s2y" targetRef="Task_0p200y6" />
+ <bpmn:sequenceFlow id="SequenceFlow_0ruppyi" sourceRef="Task_0mszkkr" targetRef="Task_0uwlr22" />
+ <bpmn:scriptTask id="Task_0mszkkr" name="post create SDNC call" scriptFormat="groovy">
+ <bpmn:incoming>SequenceFlow_1xk5xed</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_0ruppyi</bpmn:outgoing>
+ <bpmn:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
+def dcsi = new CreateActivateSDNCResource()
+dcsi.postCreateSDNC(execution)]]></bpmn:script>
+ </bpmn:scriptTask>
+ <bpmn:scriptTask id="Task_0p200y6" name="post activate SDNC call" scriptFormat="groovy">
+ <bpmn:incoming>SequenceFlow_1efgf9m</bpmn:incoming>
+ <bpmn:outgoing>SequenceFlow_0s3vc50</bpmn:outgoing>
+ <bpmn:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
+def dcsi = new CreateActivateSDNCResource()
+dcsi.postActivateSDNC(execution)]]></bpmn:script>
+ </bpmn:scriptTask>
+ <bpmn:sequenceFlow id="SequenceFlow_0s3vc50" sourceRef="Task_0p200y6" targetRef="ScriptTask_1toiss1" />
+ <bpmn:sequenceFlow id="SequenceFlow_0rp0tdn" sourceRef="ScriptTask_0a98d9a" targetRef="CallActivity_0pr0s2y" />
+ </bpmn:process>
+ <bpmndi:BPMNDiagram id="BPMNDiagram_1">
+ <bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="CreateActivateSDNCResource">
+ <bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="createSDNCRES_StartEvent">
+ <dc:Bounds x="-111" y="111" width="36" height="36" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="-134" y="147" width="85" height="28" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNEdge id="SequenceFlow_1qo2pln_di" bpmnElement="SequenceFlow_1qo2pln">
+ <di:waypoint xsi:type="dc:Point" x="-75" y="129" />
+ <di:waypoint xsi:type="dc:Point" x="-10" y="129" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="-87.5" y="108" width="90" height="12" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNEdge id="SequenceFlow_0khtova_di" bpmnElement="SequenceFlow_0khtova">
+ <di:waypoint xsi:type="dc:Point" x="413" y="129" />
+ <di:waypoint xsi:type="dc:Point" x="460" y="129" />
+ <di:waypoint xsi:type="dc:Point" x="500" y="129" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="391.5" y="108" width="90" height="12" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNShape id="ScriptTask_03j6ogo_di" bpmnElement="PreprocessIncomingRequest_task">
+ <dc:Bounds x="313" y="89" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNShape id="EndEvent_15pcuuc_di" bpmnElement="EndEvent_1x6k78c">
+ <dc:Bounds x="1049" y="544" width="36" height="36" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="1013" y="586" width="81" height="28" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNShape id="CallActivity_1600xlj_di" bpmnElement="CallActivity_1600xlj">
+ <dc:Bounds x="109" y="295" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNEdge id="SequenceFlow_1xk5xed_di" bpmnElement="SequenceFlow_1xk5xed">
+ <di:waypoint xsi:type="dc:Point" x="209" y="335" />
+ <di:waypoint xsi:type="dc:Point" x="257" y="335" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="188" y="314" width="90" height="12" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNEdge id="SequenceFlow_0ow44q0_di" bpmnElement="SequenceFlow_0ow44q0">
+ <di:waypoint xsi:type="dc:Point" x="896" y="562" />
+ <di:waypoint xsi:type="dc:Point" x="915" y="562" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="860.5" y="541" width="90" height="12" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNShape id="ScriptTask_0gyej62_di" bpmnElement="Task_023hred">
+ <dc:Bounds x="796" y="522" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNEdge id="SequenceFlow_0w2es8j_di" bpmnElement="SequenceFlow_0w2es8j">
+ <di:waypoint xsi:type="dc:Point" x="90" y="129" />
+ <di:waypoint xsi:type="dc:Point" x="148" y="129" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="74" y="108" width="90" height="12" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNEdge id="SequenceFlow_18l3crb_di" bpmnElement="SequenceFlow_18l3crb">
+ <di:waypoint xsi:type="dc:Point" x="248" y="129" />
+ <di:waypoint xsi:type="dc:Point" x="313" y="129" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="235.5" y="108" width="90" height="12" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNShape id="ScriptTask_0lc6l7a_di" bpmnElement="Task_1dlrfiw">
+ <dc:Bounds x="-10" y="89" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNShape id="ScriptTask_14l9mlv_di" bpmnElement="Task_13sx2bp">
+ <dc:Bounds x="148" y="89" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNEdge id="SequenceFlow_1mz0vdx_di" bpmnElement="SequenceFlow_1mz0vdx">
+ <di:waypoint xsi:type="dc:Point" x="606" y="129" />
+ <di:waypoint xsi:type="dc:Point" x="638" y="129" />
+ <di:waypoint xsi:type="dc:Point" x="638" y="129" />
+ <di:waypoint xsi:type="dc:Point" x="738" y="129" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="608" y="123" width="90" height="12" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNEdge id="SequenceFlow_15mvedq_di" bpmnElement="SequenceFlow_15mvedq">
+ <di:waypoint xsi:type="dc:Point" x="788" y="169" />
+ <di:waypoint xsi:type="dc:Point" x="788" y="218" />
+ <di:waypoint xsi:type="dc:Point" x="0" y="218" />
+ <di:waypoint xsi:type="dc:Point" x="0" y="335" />
+ <di:waypoint xsi:type="dc:Point" x="109" y="335" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="349" y="197" width="90" height="12" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNShape id="ScriptTask_1kqf4ge_di" bpmnElement="Task_0tezqd4">
+ <dc:Bounds x="506" y="89" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNShape id="ScriptTask_0hu4lhm_di" bpmnElement="Task_0uwlr22">
+ <dc:Bounds x="426" y="295" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNShape id="ServiceTask_1q6ssz7_di" bpmnElement="Task_18tomkl">
+ <dc:Bounds x="738" y="89" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNShape id="ServiceTask_1cm8iwr_di" bpmnElement="ServiceTask_1cm8iwr">
+ <dc:Bounds x="588" y="295" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNEdge id="SequenceFlow_1jr6zi0_di" bpmnElement="SequenceFlow_1jr6zi0">
+ <di:waypoint xsi:type="dc:Point" x="526" y="335" />
+ <di:waypoint xsi:type="dc:Point" x="554" y="335" />
+ <di:waypoint xsi:type="dc:Point" x="554" y="335" />
+ <di:waypoint xsi:type="dc:Point" x="588" y="335" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="524" y="329" width="90" height="12" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNShape id="ScriptTask_1g5zyi6_di" bpmnElement="ScriptTask_1g5zyi6">
+ <dc:Bounds x="915" y="522" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNEdge id="SequenceFlow_17md60u_di" bpmnElement="SequenceFlow_17md60u">
+ <di:waypoint xsi:type="dc:Point" x="1015" y="562" />
+ <di:waypoint xsi:type="dc:Point" x="1049" y="562" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="987" y="540" width="90" height="14" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNShape id="ScriptTask_0a98d9a_di" bpmnElement="ScriptTask_0a98d9a">
+ <dc:Bounds x="-2" y="522" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNEdge id="SequenceFlow_10cy2nu_di" bpmnElement="SequenceFlow_10cy2nu">
+ <di:waypoint xsi:type="dc:Point" x="638" y="375" />
+ <di:waypoint xsi:type="dc:Point" x="638" y="435" />
+ <di:waypoint xsi:type="dc:Point" x="-33" y="435" />
+ <di:waypoint xsi:type="dc:Point" x="-33" y="562" />
+ <di:waypoint xsi:type="dc:Point" x="-2" y="562" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="302.5" y="413" width="0" height="14" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNShape id="ScriptTask_1toiss1_di" bpmnElement="ScriptTask_1toiss1">
+ <dc:Bounds x="490" y="522" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNShape id="ServiceTask_10e6vjg_di" bpmnElement="ServiceTask_10e6vjg">
+ <dc:Bounds x="656" y="522" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNEdge id="SequenceFlow_05adaey_di" bpmnElement="SequenceFlow_05adaey">
+ <di:waypoint xsi:type="dc:Point" x="590" y="562" />
+ <di:waypoint xsi:type="dc:Point" x="656" y="562" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="623" y="540" width="0" height="14" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNEdge id="SequenceFlow_1afu5al_di" bpmnElement="SequenceFlow_1afu5al">
+ <di:waypoint xsi:type="dc:Point" x="756" y="562" />
+ <di:waypoint xsi:type="dc:Point" x="796" y="562" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="776" y="540" width="0" height="14" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNShape id="CallActivity_0pr0s2y_di" bpmnElement="CallActivity_0pr0s2y">
+ <dc:Bounds x="178" y="522" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNEdge id="SequenceFlow_1efgf9m_di" bpmnElement="SequenceFlow_1efgf9m">
+ <di:waypoint xsi:type="dc:Point" x="278" y="562" />
+ <di:waypoint xsi:type="dc:Point" x="336" y="562" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="307" y="540" width="0" height="14" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNEdge id="SequenceFlow_0ruppyi_di" bpmnElement="SequenceFlow_0ruppyi">
+ <di:waypoint xsi:type="dc:Point" x="357" y="335" />
+ <di:waypoint xsi:type="dc:Point" x="426" y="335" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="391.5" y="313" width="0" height="14" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNShape id="ScriptTask_13eovp4_di" bpmnElement="Task_0mszkkr">
+ <dc:Bounds x="257" y="295" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNShape id="ScriptTask_0ymnxuf_di" bpmnElement="Task_0p200y6">
+ <dc:Bounds x="336" y="522" width="100" height="80" />
+ </bpmndi:BPMNShape>
+ <bpmndi:BPMNEdge id="SequenceFlow_0s3vc50_di" bpmnElement="SequenceFlow_0s3vc50">
+ <di:waypoint xsi:type="dc:Point" x="436" y="562" />
+ <di:waypoint xsi:type="dc:Point" x="490" y="562" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="463" y="540" width="0" height="14" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNEdge id="SequenceFlow_0rp0tdn_di" bpmnElement="SequenceFlow_0rp0tdn">
+ <di:waypoint xsi:type="dc:Point" x="98" y="562" />
+ <di:waypoint xsi:type="dc:Point" x="178" y="562" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="138" y="540" width="0" height="14" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ </bpmndi:BPMNPlane>
+ </bpmndi:BPMNDiagram>
+</bpmn:definitions>
diff --git a/bpmn/MSOInfrastructureBPMN/src/main/resources/subprocess/DoCreateE2EServiceInstance.bpmn b/bpmn/MSOInfrastructureBPMN/src/main/resources/subprocess/DoCreateE2EServiceInstance.bpmn
index 8fe6b70d1a..0475a6a963 100644
--- a/bpmn/MSOInfrastructureBPMN/src/main/resources/subprocess/DoCreateE2EServiceInstance.bpmn
+++ b/bpmn/MSOInfrastructureBPMN/src/main/resources/subprocess/DoCreateE2EServiceInstance.bpmn
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
-<bpmn2:definitions xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:bpmn2="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="_MagIIMOUEeW8asg-vCEgWQ" targetNamespace="http://camunda.org/schema/1.0/bpmn" exporter="Camunda Modeler" exporterVersion="1.11.3" xsi:schemaLocation="http://www.omg.org/spec/BPMN/20100524/MODEL BPMN20.xsd">
+<bpmn2:definitions xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:bpmn2="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="_MagIIMOUEeW8asg-vCEgWQ" targetNamespace="http://camunda.org/schema/1.0/bpmn" exporter="Camunda Modeler" exporterVersion="1.10.0" xsi:schemaLocation="http://www.omg.org/spec/BPMN/20100524/MODEL BPMN20.xsd">
<bpmn2:process id="DoCreateE2EServiceInstanceV3" name="DoCreateE2EServiceInstanceV3" isExecutable="true">
<bpmn2:startEvent id="createSI_startEvent" name="Start Flow">
<bpmn2:outgoing>SequenceFlow_1qiiycn</bpmn2:outgoing>
@@ -99,7 +99,7 @@ ddsi.postProcessAAIPUT(execution)]]></bpmn2:script>
</bpmn2:scriptTask>
<bpmn2:sequenceFlow id="SequenceFlow_1qctzm0" sourceRef="Task_0uiekmn" targetRef="Task_0raqlqc" />
<bpmn2:scriptTask id="Task_0uiekmn" name="Prepare Resource Oper Status" scriptFormat="groovy">
- <bpmn2:incoming>SequenceFlow_1hbesp9</bpmn2:incoming>
+ <bpmn2:incoming>SequenceFlow_03ebqhf</bpmn2:incoming>
<bpmn2:outgoing>SequenceFlow_1qctzm0</bpmn2:outgoing>
<bpmn2:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
def ddsi = new DoCreateE2EServiceInstance()
@@ -132,12 +132,12 @@ ddsi.preInitResourcesOperStatus(execution)]]></bpmn2:script>
<bpmn2:linkEventDefinition name="Decompose_Service" />
</bpmn2:intermediateThrowEvent>
<bpmn2:intermediateThrowEvent id="IntermediateThrowEvent_1mlbhmt" name="GoTo StartService">
- <bpmn2:incoming>SequenceFlow_1gusrvp</bpmn2:incoming>
+ <bpmn2:incoming>SequenceFlow_012h7yx</bpmn2:incoming>
<bpmn2:linkEventDefinition name="StartService" />
</bpmn2:intermediateThrowEvent>
<bpmn2:scriptTask id="ScriptTask_1o01d7d" name="PostProcess&#10;Decompose&#10;Service&#10;" scriptFormat="groovy">
<bpmn2:incoming>SequenceFlow_0xjwb45</bpmn2:incoming>
- <bpmn2:outgoing>SequenceFlow_027owbf</bpmn2:outgoing>
+ <bpmn2:outgoing>SequenceFlow_012h7yx</bpmn2:outgoing>
<bpmn2:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
def dcsi= new DoCreateE2EServiceInstance()
dcsi.processDecomposition(execution)]]></bpmn2:script>
@@ -165,7 +165,6 @@ dcsi.prepareDecomposeService(execution)]]></bpmn2:script>
<bpmn2:outgoing>SequenceFlow_166w91p</bpmn2:outgoing>
<bpmn2:linkEventDefinition name="Decompose_Service" />
</bpmn2:intermediateCatchEvent>
- <bpmn2:sequenceFlow id="SequenceFlow_027owbf" sourceRef="ScriptTask_1o01d7d" targetRef="Task_0ush1g4" />
<bpmn2:sequenceFlow id="SequenceFlow_0xjwb45" sourceRef="CallActivity_0biblpc" targetRef="ScriptTask_1o01d7d" />
<bpmn2:sequenceFlow id="SequenceFlow_0qxzgvq" sourceRef="ScriptTask_1cllqk3" targetRef="CallActivity_0biblpc" />
<bpmn2:sequenceFlow id="SequenceFlow_1qiiycn" sourceRef="createSI_startEvent" targetRef="preProcessRequest_ScriptTask" />
@@ -185,11 +184,10 @@ dcsi.prepareDecomposeService(execution)]]></bpmn2:script>
<bpmn2:outgoing>SequenceFlow_1hbesp9</bpmn2:outgoing>
<bpmn2:linkEventDefinition name="StartPrepareResource" />
</bpmn2:intermediateCatchEvent>
- <bpmn2:sequenceFlow id="SequenceFlow_1hbesp9" sourceRef="IntermediateCatchEvent_05dus9b" targetRef="Task_0uiekmn" />
- <bpmn2:sequenceFlow id="SequenceFlow_1gusrvp" sourceRef="Task_0ush1g4" targetRef="IntermediateThrowEvent_1mlbhmt" />
- <bpmn2:scriptTask id="Task_0ush1g4" name="Call Homing(To be Done)" scriptFormat="groovy">
- <bpmn2:incoming>SequenceFlow_027owbf</bpmn2:incoming>
- <bpmn2:outgoing>SequenceFlow_1gusrvp</bpmn2:outgoing>
+ <bpmn2:sequenceFlow id="SequenceFlow_1hbesp9" sourceRef="IntermediateCatchEvent_05dus9b" targetRef="Task_0dqjp43" />
+ <bpmn2:scriptTask id="Task_0ush1g4" name="Call Service OOF" scriptFormat="groovy">
+ <bpmn2:incoming>SequenceFlow_01s0ef2</bpmn2:incoming>
+ <bpmn2:outgoing>SequenceFlow_03ebqhf</bpmn2:outgoing>
<bpmn2:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
def dcsi= new DoCreateE2EServiceInstance()
dcsi.doServiceHoming(execution)]]></bpmn2:script>
@@ -236,6 +234,16 @@ csi.postProcessForAddResource(execution)]]></bpmn2:script>
<bpmn2:incoming>SequenceFlow_0a6vgsu</bpmn2:incoming>
</bpmn2:endEvent>
<bpmn2:sequenceFlow id="SequenceFlow_0a6vgsu" sourceRef="ScriptTask_1y7jr4t" targetRef="EndEvent_0hzmoug" />
+ <bpmn2:sequenceFlow id="SequenceFlow_03ebqhf" sourceRef="Task_0ush1g4" targetRef="Task_0uiekmn" />
+ <bpmn2:sequenceFlow id="SequenceFlow_012h7yx" sourceRef="ScriptTask_1o01d7d" targetRef="IntermediateThrowEvent_1mlbhmt" />
+ <bpmn2:sequenceFlow id="SequenceFlow_01s0ef2" sourceRef="Task_0dqjp43" targetRef="Task_0ush1g4" />
+ <bpmn2:scriptTask id="Task_0dqjp43" name="Call Service Pre Operation" scriptFormat="groovy">
+ <bpmn2:incoming>SequenceFlow_1hbesp9</bpmn2:incoming>
+ <bpmn2:outgoing>SequenceFlow_01s0ef2</bpmn2:outgoing>
+ <bpmn2:script><![CDATA[import org.openecomp.mso.bpmn.infrastructure.scripts.*
+def dcsi= new DoCreateE2EServiceInstance()
+dcsi.doServicePreOperation(execution)]]></bpmn2:script>
+ </bpmn2:scriptTask>
</bpmn2:process>
<bpmn2:error id="Error_2" name="MSOWorkflowException" errorCode="MSOWorkflowException" />
<bpmn2:error id="Error_1" name="java.lang.Exception" errorCode="java.lang.Exception" />
@@ -344,17 +352,17 @@ csi.postProcessForAddResource(execution)]]></bpmn2:script>
</bpmndi:BPMNLabel>
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="SequenceFlow_1qctzm0_di" bpmnElement="SequenceFlow_1qctzm0">
- <di:waypoint xsi:type="dc:Point" x="296" y="300" />
- <di:waypoint xsi:type="dc:Point" x="402" y="300" />
+ <di:waypoint xsi:type="dc:Point" x="534" y="300" />
+ <di:waypoint xsi:type="dc:Point" x="604" y="300" />
<bpmndi:BPMNLabel>
- <dc:Bounds x="304" y="279" width="90" height="12" />
+ <dc:Bounds x="524" y="279" width="90" height="12" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="ScriptTask_0v81r5h_di" bpmnElement="Task_0uiekmn">
- <dc:Bounds x="196" y="260" width="100" height="80" />
+ <dc:Bounds x="434" y="260" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="ServiceTask_14tnuxf_di" bpmnElement="Task_0raqlqc">
- <dc:Bounds x="402" y="260" width="100" height="80" />
+ <dc:Bounds x="604" y="260" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="IntermediateThrowEvent_11saqvj_di" bpmnElement="IntermediateThrowEvent_0bq4fxs">
<dc:Bounds x="1315" y="-207" width="36" height="36" />
@@ -383,13 +391,6 @@ csi.postProcessForAddResource(execution)]]></bpmn2:script>
<dc:Bounds x="2" y="-21" width="88" height="24" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNShape>
- <bpmndi:BPMNEdge id="SequenceFlow_027owbf_di" bpmnElement="SequenceFlow_027owbf">
- <di:waypoint xsi:type="dc:Point" x="813" y="-39" />
- <di:waypoint xsi:type="dc:Point" x="1057" y="-39" />
- <bpmndi:BPMNLabel>
- <dc:Bounds x="890" y="-60" width="90" height="12" />
- </bpmndi:BPMNLabel>
- </bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="SequenceFlow_0xjwb45_di" bpmnElement="SequenceFlow_0xjwb45">
<di:waypoint xsi:type="dc:Point" x="578" y="-39" />
<di:waypoint xsi:type="dc:Point" x="713" y="-39" />
@@ -463,53 +464,42 @@ csi.postProcessForAddResource(execution)]]></bpmn2:script>
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_1hbesp9_di" bpmnElement="SequenceFlow_1hbesp9">
<di:waypoint xsi:type="dc:Point" x="54" y="300" />
- <di:waypoint xsi:type="dc:Point" x="196" y="300" />
- <bpmndi:BPMNLabel>
- <dc:Bounds x="125" y="279" width="0" height="12" />
- </bpmndi:BPMNLabel>
- </bpmndi:BPMNEdge>
- <bpmndi:BPMNEdge id="SequenceFlow_1gusrvp_di" bpmnElement="SequenceFlow_1gusrvp">
- <di:waypoint xsi:type="dc:Point" x="1157" y="-39" />
- <di:waypoint xsi:type="dc:Point" x="1315" y="-39" />
+ <di:waypoint xsi:type="dc:Point" x="87" y="300" />
<bpmndi:BPMNLabel>
- <dc:Bounds x="1236" y="-60" width="0" height="12" />
+ <dc:Bounds x="25.5" y="279" width="90" height="12" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="ScriptTask_0wr11dt_di" bpmnElement="Task_0ush1g4">
- <dc:Bounds x="1057" y="-79" width="100" height="80" />
+ <dc:Bounds x="277" y="260" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="CallActivity_1ojtwas_di" bpmnElement="CallActivity_1ojtwas">
- <dc:Bounds x="852" y="260" width="100" height="80" />
+ <dc:Bounds x="971" y="260" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="ScriptTask_04b21gb_di" bpmnElement="ScriptTask_04b21gb">
- <dc:Bounds x="629" y="260" width="100" height="80" />
+ <dc:Bounds x="799" y="260" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="ScriptTask_1y7jr4t_di" bpmnElement="ScriptTask_1y7jr4t">
- <dc:Bounds x="1068" y="260" width="100" height="80" />
+ <dc:Bounds x="1145" y="260" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_13xfsff_di" bpmnElement="SequenceFlow_13xfsff">
- <di:waypoint xsi:type="dc:Point" x="502" y="300" />
- <di:waypoint xsi:type="dc:Point" x="629" y="300" />
+ <di:waypoint xsi:type="dc:Point" x="704" y="300" />
+ <di:waypoint xsi:type="dc:Point" x="799" y="300" />
<bpmndi:BPMNLabel>
- <dc:Bounds x="565.5" y="279" width="0" height="12" />
+ <dc:Bounds x="706.5" y="279" width="90" height="12" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="SequenceFlow_0bf6bzp_di" bpmnElement="SequenceFlow_0bf6bzp">
- <di:waypoint xsi:type="dc:Point" x="729" y="300" />
- <di:waypoint xsi:type="dc:Point" x="789" y="300" />
- <di:waypoint xsi:type="dc:Point" x="789" y="300" />
- <di:waypoint xsi:type="dc:Point" x="852" y="300" />
+ <di:waypoint xsi:type="dc:Point" x="899" y="300" />
+ <di:waypoint xsi:type="dc:Point" x="971" y="300" />
<bpmndi:BPMNLabel>
- <dc:Bounds x="804" y="294" width="0" height="12" />
+ <dc:Bounds x="890" y="279" width="90" height="12" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="SequenceFlow_0d0c20n_di" bpmnElement="SequenceFlow_0d0c20n">
- <di:waypoint xsi:type="dc:Point" x="952" y="300" />
- <di:waypoint xsi:type="dc:Point" x="1009" y="300" />
- <di:waypoint xsi:type="dc:Point" x="1009" y="300" />
- <di:waypoint xsi:type="dc:Point" x="1068" y="300" />
+ <di:waypoint xsi:type="dc:Point" x="1071" y="300" />
+ <di:waypoint xsi:type="dc:Point" x="1145" y="300" />
<bpmndi:BPMNLabel>
- <dc:Bounds x="1024" y="294" width="0" height="12" />
+ <dc:Bounds x="1063" y="279" width="90" height="12" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="EndEvent_0hzmoug_di" bpmnElement="EndEvent_0hzmoug">
@@ -519,14 +509,36 @@ csi.postProcessForAddResource(execution)]]></bpmn2:script>
</bpmndi:BPMNLabel>
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_0a6vgsu_di" bpmnElement="SequenceFlow_0a6vgsu">
- <di:waypoint xsi:type="dc:Point" x="1168" y="300" />
- <di:waypoint xsi:type="dc:Point" x="1242" y="300" />
- <di:waypoint xsi:type="dc:Point" x="1242" y="300" />
+ <di:waypoint xsi:type="dc:Point" x="1245" y="300" />
<di:waypoint xsi:type="dc:Point" x="1315" y="300" />
<bpmndi:BPMNLabel>
- <dc:Bounds x="1257" y="294" width="0" height="12" />
+ <dc:Bounds x="1235" y="279" width="90" height="12" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNEdge>
+ <bpmndi:BPMNEdge id="SequenceFlow_03ebqhf_di" bpmnElement="SequenceFlow_03ebqhf">
+ <di:waypoint xsi:type="dc:Point" x="377" y="300" />
+ <di:waypoint xsi:type="dc:Point" x="434" y="300" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="405.5" y="278" width="0" height="14" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNEdge id="SequenceFlow_012h7yx_di" bpmnElement="SequenceFlow_012h7yx">
+ <di:waypoint xsi:type="dc:Point" x="813" y="-39" />
+ <di:waypoint xsi:type="dc:Point" x="1315" y="-39" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="1064" y="-61" width="0" height="14" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNEdge id="SequenceFlow_01s0ef2_di" bpmnElement="SequenceFlow_01s0ef2">
+ <di:waypoint xsi:type="dc:Point" x="187" y="300" />
+ <di:waypoint xsi:type="dc:Point" x="277" y="300" />
+ <bpmndi:BPMNLabel>
+ <dc:Bounds x="232" y="278" width="0" height="14" />
+ </bpmndi:BPMNLabel>
+ </bpmndi:BPMNEdge>
+ <bpmndi:BPMNShape id="ScriptTask_1uhlqf5_di" bpmnElement="Task_0dqjp43">
+ <dc:Bounds x="87" y="260" width="100" height="80" />
+ </bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn2:definitions>
diff --git a/bpmn/pom.xml b/bpmn/pom.xml
index 2989a1892c..d7e26f9d85 100644
--- a/bpmn/pom.xml
+++ b/bpmn/pom.xml
@@ -105,6 +105,7 @@
<artifactId>maven-surefire-plugin</artifactId>
<version>2.19.1</version>
<configuration>
+ <redirectTestOutputToFile>true</redirectTestOutputToFile>
<testFailureIgnore>false</testFailureIgnore>
<argLine>${surefireArgLine} -Xss1m</argLine>
<forkCount>1</forkCount>
diff --git a/docs/Building_SO.rst b/docs/Building_SO.rst
index 29ff6bd0e8..2e411d5e11 100644
--- a/docs/Building_SO.rst
+++ b/docs/Building_SO.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. Copyright 2017 Huawei Technologies Co., Ltd.
+.. Copyright 2018 Huawei Technologies Co., Ltd.
Building SO
============