diff options
Diffstat (limited to 'aria/multivim-plugin/cinder_plugin')
-rw-r--r-- | aria/multivim-plugin/cinder_plugin/__init__.py | 14 | ||||
-rw-r--r-- | aria/multivim-plugin/cinder_plugin/tests/__init__.py | 14 | ||||
-rw-r--r-- | aria/multivim-plugin/cinder_plugin/tests/test_volume.py | 342 | ||||
-rw-r--r-- | aria/multivim-plugin/cinder_plugin/volume.py | 125 |
4 files changed, 495 insertions, 0 deletions
diff --git a/aria/multivim-plugin/cinder_plugin/__init__.py b/aria/multivim-plugin/cinder_plugin/__init__.py new file mode 100644 index 0000000000..a9dfcc4473 --- /dev/null +++ b/aria/multivim-plugin/cinder_plugin/__init__.py @@ -0,0 +1,14 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. diff --git a/aria/multivim-plugin/cinder_plugin/tests/__init__.py b/aria/multivim-plugin/cinder_plugin/tests/__init__.py new file mode 100644 index 0000000000..a9dfcc4473 --- /dev/null +++ b/aria/multivim-plugin/cinder_plugin/tests/__init__.py @@ -0,0 +1,14 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. diff --git a/aria/multivim-plugin/cinder_plugin/tests/test_volume.py b/aria/multivim-plugin/cinder_plugin/tests/test_volume.py new file mode 100644 index 0000000000..0ee85bc334 --- /dev/null +++ b/aria/multivim-plugin/cinder_plugin/tests/test_volume.py @@ -0,0 +1,342 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import mock +import unittest + +from cloudify import mocks as cfy_mocks +from cloudify import exceptions as cfy_exc +from cloudify.state import current_ctx +from cinder_plugin import volume +from nova_plugin import server +from openstack_plugin_common import (OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY) + + +class TestCinderVolume(unittest.TestCase): + + def _mock(self, **kwargs): + ctx = cfy_mocks.MockCloudifyContext(**kwargs) + current_ctx.set(ctx) + return ctx + + def tearDown(self): + current_ctx.clear() + + def test_create_new(self): + volume_name = 'fake volume name' + volume_description = 'fake volume' + volume_id = '00000000-0000-0000-0000-000000000000' + volume_size = 10 + + volume_properties = { + 'volume': { + 'size': volume_size, + 'description': volume_description + }, + 'use_external_resource': False, + 'device_name': '/dev/fake', + 'resource_id': volume_name, + } + + creating_volume_m = mock.Mock() + creating_volume_m.id = volume_id + creating_volume_m.status = volume.VOLUME_STATUS_CREATING + available_volume_m = mock.Mock() + available_volume_m.id = volume_id + available_volume_m.status = volume.VOLUME_STATUS_AVAILABLE + cinder_client_m = mock.Mock() + cinder_client_m.volumes = mock.Mock() + cinder_client_m.volumes.create = mock.Mock( + return_value=creating_volume_m) + cinder_client_m.volumes.get = mock.Mock( + return_value=available_volume_m) + ctx_m = self._mock(node_id='a', properties=volume_properties) + + volume.create(cinder_client=cinder_client_m, args={}, ctx=ctx_m, + status_attempts=10, status_timeout=2) + + cinder_client_m.volumes.create.assert_called_once_with( + size=volume_size, + name=volume_name, + description=volume_description) + cinder_client_m.volumes.get.assert_called_once_with(volume_id) + self.assertEqual( + volume_id, + ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) + self.assertEqual( + volume.VOLUME_OPENSTACK_TYPE, + ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY]) + + def test_create_use_existing(self): + volume_id = '00000000-0000-0000-0000-000000000000' + + volume_properties = { + 'use_external_resource': True, + 'device_name': '/dev/fake', + 'resource_id': volume_id, + } + existing_volume_m = mock.Mock() + existing_volume_m.id = volume_id + existing_volume_m.status = volume.VOLUME_STATUS_AVAILABLE + cinder_client_m = mock.Mock() + cinder_client_m.volumes = mock.Mock() + cinder_client_m.volumes.create = mock.Mock() + cinder_client_m.cosmo_get_if_exists = mock.Mock( + return_value=existing_volume_m) + cinder_client_m.get_id_from_resource = mock.Mock( + return_value=volume_id) + ctx_m = self._mock(node_id='a', properties=volume_properties) + + volume.create(cinder_client=cinder_client_m, args={}, ctx=ctx_m, + status_attempts=10, status_timeout=2) + + self.assertFalse(cinder_client_m.volumes.create.called) + self.assertEqual( + volume_id, + ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) + self.assertEqual( + volume.VOLUME_OPENSTACK_TYPE, + ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY]) + + def test_delete(self): + volume_id = '00000000-0000-0000-0000-000000000000' + volume_name = 'test-volume' + + volume_properties = { + 'use_external_resource': False, + } + + cinder_client_m = mock.Mock() + cinder_client_m.cosmo_delete_resource = mock.Mock() + + ctx_m = self._mock(node_id='a', properties=volume_properties) + ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = volume_id + ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ + volume.VOLUME_OPENSTACK_TYPE + ctx_m.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ + volume_name + + volume.delete(cinder_client=cinder_client_m, ctx=ctx_m) + + cinder_client_m.cosmo_delete_resource.assert_called_once_with( + volume.VOLUME_OPENSTACK_TYPE, volume_id) + self.assertTrue( + OPENSTACK_ID_PROPERTY not in ctx_m.instance.runtime_properties) + self.assertTrue(OPENSTACK_TYPE_PROPERTY + not in ctx_m.instance.runtime_properties) + self.assertTrue(OPENSTACK_NAME_PROPERTY + not in ctx_m.instance.runtime_properties) + + @mock.patch('openstack_plugin_common.NovaClientWithSugar') + @mock.patch('openstack_plugin_common.CinderClientWithSugar') + @mock.patch.object(volume, 'wait_until_status', return_value=(None, True)) + def test_attach(self, wait_until_status_m, cinder_m, nova_m): + volume_id = '00000000-0000-0000-0000-000000000000' + server_id = '11111111-1111-1111-1111-111111111111' + device_name = '/dev/fake' + + volume_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {volume.DEVICE_NAME_PROPERTY: device_name} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + OPENSTACK_ID_PROPERTY: volume_id, + } + }) + }) + server_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + server.OPENSTACK_ID_PROPERTY: server_id + } + }) + }) + + ctx_m = self._mock(node_id='a', + target=server_ctx, + source=volume_ctx) + + nova_instance = nova_m.return_value + cinder_instance = cinder_m.return_value + + server.attach_volume(ctx=ctx_m, status_attempts=10, + status_timeout=2) + + nova_instance.volumes.create_server_volume.assert_called_once_with( + server_id, volume_id, device_name) + wait_until_status_m.assert_called_once_with( + cinder_client=cinder_instance, + volume_id=volume_id, + status=volume.VOLUME_STATUS_IN_USE, + num_tries=10, + timeout=2, + ) + + @mock.patch('openstack_plugin_common.NovaClientWithSugar') + @mock.patch('openstack_plugin_common.CinderClientWithSugar') + def _test_cleanup__after_attach_fails( + self, expected_err_cls, expect_cleanup, + wait_until_status_m, cinder_m, nova_m): + volume_id = '00000000-0000-0000-0000-000000000000' + server_id = '11111111-1111-1111-1111-111111111111' + attachment_id = '22222222-2222-2222-2222-222222222222' + device_name = '/dev/fake' + + attachment = {'id': attachment_id, + 'server_id': server_id, + 'volume_id': volume_id} + + volume_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {volume.DEVICE_NAME_PROPERTY: device_name} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + OPENSTACK_ID_PROPERTY: volume_id, + } + }) + }) + server_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + server.OPENSTACK_ID_PROPERTY: server_id + } + }) + }) + + ctx_m = self._mock(node_id='a', + target=server_ctx, + source=volume_ctx) + + attached_volume = mock.Mock(id=volume_id, + status=volume.VOLUME_STATUS_IN_USE, + attachments=[attachment]) + nova_instance = nova_m.return_value + cinder_instance = cinder_m.return_value + cinder_instance.volumes.get.return_value = attached_volume + + with self.assertRaises(expected_err_cls): + server.attach_volume(ctx=ctx_m, status_attempts=10, + status_timeout=2) + + nova_instance.volumes.create_server_volume.assert_called_once_with( + server_id, volume_id, device_name) + volume.wait_until_status.assert_any_call( + cinder_client=cinder_instance, + volume_id=volume_id, + status=volume.VOLUME_STATUS_IN_USE, + num_tries=10, + timeout=2, + ) + if expect_cleanup: + nova_instance.volumes.delete_server_volume.assert_called_once_with( + server_id, attachment_id) + self.assertEqual(2, volume.wait_until_status.call_count) + volume.wait_until_status.assert_called_with( + cinder_client=cinder_instance, + volume_id=volume_id, + status=volume.VOLUME_STATUS_AVAILABLE, + num_tries=10, + timeout=2) + + def test_cleanup_after_waituntilstatus_throws_recoverable_error(self): + err = cfy_exc.RecoverableError('Some recoverable error') + with mock.patch.object(volume, 'wait_until_status', + side_effect=[err, (None, True)]) as wait_mock: + self._test_cleanup__after_attach_fails(type(err), True, wait_mock) + + def test_cleanup_after_waituntilstatus_throws_any_not_nonrecov_error(self): + class ArbitraryNonRecoverableException(Exception): + pass + err = ArbitraryNonRecoverableException('An exception') + with mock.patch.object(volume, 'wait_until_status', + side_effect=[err, (None, True)]) as wait_mock: + self._test_cleanup__after_attach_fails(type(err), True, wait_mock) + + def test_cleanup_after_waituntilstatus_lets_nonrecov_errors_pass(self): + err = cfy_exc.NonRecoverableError('Some non recoverable error') + with mock.patch.object(volume, 'wait_until_status', + side_effect=[err, (None, True)]) as wait_mock: + self._test_cleanup__after_attach_fails(type(err), False, wait_mock) + + @mock.patch.object(volume, 'wait_until_status', return_value=(None, False)) + def test_cleanup_after_waituntilstatus_times_out(self, wait_mock): + self._test_cleanup__after_attach_fails(cfy_exc.RecoverableError, True, + wait_mock) + + @mock.patch('openstack_plugin_common.NovaClientWithSugar') + @mock.patch('openstack_plugin_common.CinderClientWithSugar') + @mock.patch.object(volume, 'wait_until_status', return_value=(None, True)) + def test_detach(self, wait_until_status_m, cinder_m, nova_m): + volume_id = '00000000-0000-0000-0000-000000000000' + server_id = '11111111-1111-1111-1111-111111111111' + attachment_id = '22222222-2222-2222-2222-222222222222' + + attachment = {'id': attachment_id, + 'server_id': server_id, + 'volume_id': volume_id} + + volume_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + OPENSTACK_ID_PROPERTY: volume_id, + } + }) + }) + server_ctx = cfy_mocks.MockContext({ + 'node': cfy_mocks.MockContext({ + 'properties': {} + }), + 'instance': cfy_mocks.MockContext({ + 'runtime_properties': { + server.OPENSTACK_ID_PROPERTY: server_id + } + }) + }) + + ctx_m = self._mock(node_id='a', + target=server_ctx, + source=volume_ctx) + + attached_volume = mock.Mock(id=volume_id, + status=volume.VOLUME_STATUS_IN_USE, + attachments=[attachment]) + nova_instance = nova_m.return_value + cinder_instance = cinder_m.return_value + cinder_instance.volumes.get.return_value = attached_volume + + server.detach_volume(ctx=ctx_m, status_attempts=10, status_timeout=2) + + nova_instance.volumes.delete_server_volume.assert_called_once_with( + server_id, attachment_id) + volume.wait_until_status.assert_called_once_with( + cinder_client=cinder_instance, + volume_id=volume_id, + status=volume.VOLUME_STATUS_AVAILABLE, + num_tries=10, + timeout=2, + ) diff --git a/aria/multivim-plugin/cinder_plugin/volume.py b/aria/multivim-plugin/cinder_plugin/volume.py new file mode 100644 index 0000000000..168681b943 --- /dev/null +++ b/aria/multivim-plugin/cinder_plugin/volume.py @@ -0,0 +1,125 @@ +######### +# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +import time + +from cloudify import ctx +from cloudify.decorators import operation +from cloudify import exceptions as cfy_exc + +from openstack_plugin_common import (delete_resource_and_runtime_properties, + with_cinder_client, + get_resource_id, + transform_resource_name, + use_external_resource, + validate_resource, + COMMON_RUNTIME_PROPERTIES_KEYS, + OPENSTACK_AZ_PROPERTY, + OPENSTACK_ID_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY) +from glance_plugin.image import handle_image_from_relationship + +VOLUME_STATUS_CREATING = 'creating' +VOLUME_STATUS_DELETING = 'deleting' +VOLUME_STATUS_AVAILABLE = 'available' +VOLUME_STATUS_IN_USE = 'in-use' +VOLUME_STATUS_ERROR = 'error' +VOLUME_STATUS_ERROR_DELETING = 'error_deleting' +VOLUME_ERROR_STATUSES = (VOLUME_STATUS_ERROR, VOLUME_STATUS_ERROR_DELETING) + +# Note: The 'device_name' property should actually be a property of the +# relationship between a server and a volume; It'll move to that +# relationship type once relationship properties are better supported. +DEVICE_NAME_PROPERTY = 'device_name' + +VOLUME_OPENSTACK_TYPE = 'volume' + +RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + + +@operation +@with_cinder_client +def create(cinder_client, status_attempts, status_timeout, args, **kwargs): + + if use_external_resource(ctx, cinder_client, VOLUME_OPENSTACK_TYPE, + 'name'): + return + + name = get_resource_id(ctx, VOLUME_OPENSTACK_TYPE) + volume_dict = {'name': name} + volume_dict.update(ctx.node.properties['volume'], **args) + handle_image_from_relationship(volume_dict, 'imageRef', ctx) + volume_dict['name'] = transform_resource_name( + ctx, volume_dict['name']) + + v = cinder_client.volumes.create(**volume_dict) + + ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = v.id + ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ + VOLUME_OPENSTACK_TYPE + ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ + volume_dict['name'] + wait_until_status(cinder_client=cinder_client, + volume_id=v.id, + status=VOLUME_STATUS_AVAILABLE, + num_tries=status_attempts, + timeout=status_timeout, + ) + ctx.instance.runtime_properties[OPENSTACK_AZ_PROPERTY] = \ + v.availability_zone + + +@operation +@with_cinder_client +def delete(cinder_client, **kwargs): + delete_resource_and_runtime_properties(ctx, cinder_client, + RUNTIME_PROPERTIES_KEYS) + + +@with_cinder_client +def wait_until_status(cinder_client, volume_id, status, num_tries, + timeout): + for _ in range(num_tries): + volume = cinder_client.volumes.get(volume_id) + + if volume.status in VOLUME_ERROR_STATUSES: + raise cfy_exc.NonRecoverableError( + "Volume {0} is in error state".format(volume_id)) + + if volume.status == status: + return volume, True + time.sleep(timeout) + + ctx.logger.warning("Volume {0} current state: '{1}', " + "expected state: '{2}'".format(volume_id, + volume.status, + status)) + return volume, False + + +@with_cinder_client +def get_attachment(cinder_client, volume_id, server_id): + volume = cinder_client.volumes.get(volume_id) + for attachment in volume.attachments: + if attachment['server_id'] == server_id: + return attachment + + +@operation +@with_cinder_client +def creation_validation(cinder_client, **kwargs): + validate_resource(ctx, cinder_client, VOLUME_OPENSTACK_TYPE, + 'name') |