From 0e4bd7f93b9bfadcc2bb6dfaeae7bb5ee00c194b Mon Sep 17 00:00:00 2001 From: Avishay Traeger Date: Sun, 11 Aug 2013 13:22:15 +0300 Subject: [PATCH] Allow swap_volume to be called by Cinder This allows Cinder to call swap_volume as part of a volume migration. Cinder creates the new volume and calls Nova to perform the swap. When Nova finishes, it calls Cinder to complete the migration, where it updates the volume metadata so that the volume ID remains unchanged. Thus, if Cinder detects that the migration was initiated by Cinder, it will return the original volume ID to keep using. Otherwise, it will return the new volume's ID for Nova to use, which is what it had done before. Change-Id: If352f2e11701148a5aeccaf7fd6379528fa8be31 --- nova/compute/manager.py | 18 +++++++++++++++++- nova/tests/compute/test_compute_mgr.py | 8 ++++++++ nova/volume/cinder.py | 5 +++++ 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 8a907b2b47..ae5e6b3a0b 100755 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -3582,6 +3582,10 @@ class ComputeManager(manager.SchedulerDependentManager): context=context, instance=instance) self.volume_api.unreserve_volume(context, new_volume_id) + self.volume_api.migrate_volume_completion(context, + old_volume_id, + new_volume_id, + error=True) old_cinfo = jsonutils.loads(bdm['connection_info']) if old_cinfo and 'serial' not in old_cinfo: @@ -3601,6 +3605,10 @@ class ComputeManager(manager.SchedulerDependentManager): self.volume_api.terminate_connection(context, new_volume_id, connector) + self.volume_api.migrate_volume_completion(context, + old_volume_id, + new_volume_id, + error=True) self.volume_api.attach(context, new_volume_id, instance['uuid'], @@ -3609,6 +3617,14 @@ class ComputeManager(manager.SchedulerDependentManager): volume = self.volume_api.get(context, old_volume_id) self.volume_api.terminate_connection(context, old_volume_id, connector) self.volume_api.detach(context.elevated(), old_volume_id) + + # If Cinder initiated the swap, it will keep the original ID + comp_ret = self.volume_api.migrate_volume_completion(context, + old_volume_id, + new_volume_id, + error=False) + save_volume_id = comp_ret['save_volume_id'] + # Update bdm values = { 'instance_uuid': instance['uuid'], @@ -3617,7 +3633,7 @@ class ComputeManager(manager.SchedulerDependentManager): 'delete_on_termination': False, 'virtual_name': None, 'snapshot_id': None, - 'volume_id': new_volume_id, + 'volume_id': save_volume_id, 'volume_size': None, 'no_device': None} self.conductor_api.block_device_mapping_update_or_create(context, diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 653c2b82c8..0be3c361e0 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -607,6 +607,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): self.assertTrue(uuidutils.is_uuid_like(volume_id)) volumes[volume_id]['status'] = 'available' + def fake_vol_migrate_volume_completion(context, old_volume_id, + new_volume_id, error=False): + self.assertTrue(uuidutils.is_uuid_like(old_volume_id)) + self.assertTrue(uuidutils.is_uuid_like(old_volume_id)) + return {'save_volume_id': new_volume_id} + def fake_func_exc(*args, **kwargs): raise AttributeError # Random exception @@ -626,6 +632,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): lambda x: {}) self.stubs.Set(self.compute.driver, 'swap_volume', lambda w, x, y, z: None) + self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion', + fake_vol_migrate_volume_completion) self.stubs.Set(self.compute.conductor_api, 'block_device_mapping_update_or_create', lambda x, y: None) diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py index 8d6bc8454a..328831b95c 100644 --- a/nova/volume/cinder.py +++ b/nova/volume/cinder.py @@ -278,6 +278,11 @@ class API(base.Base): return cinderclient(context).volumes.terminate_connection(volume_id, connector) + def migrate_volume_completion(self, context, old_volume_id, new_volume_id, + error=False): + return cinderclient(context).volumes.migrate_volume_completion( + old_volume_id, new_volume_id, error) + def create(self, context, size, name, description, snapshot=None, image_id=None, volume_type=None, metadata=None, availability_zone=None):