Allow swap_volume to be called by Cinder
This allows Cinder to call swap_volume as part of a volume migration. Cinder creates the new volume and calls Nova to perform the swap. When Nova finishes, it calls Cinder to complete the migration, where it updates the volume metadata so that the volume ID remains unchanged. Thus, if Cinder detects that the migration was initiated by Cinder, it will return the original volume ID to keep using. Otherwise, it will return the new volume's ID for Nova to use, which is what it had done before. Change-Id: If352f2e11701148a5aeccaf7fd6379528fa8be31
This commit is contained in:
+17
-1
@@ -3582,6 +3582,10 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
context=context,
|
||||
instance=instance)
|
||||
self.volume_api.unreserve_volume(context, new_volume_id)
|
||||
self.volume_api.migrate_volume_completion(context,
|
||||
old_volume_id,
|
||||
new_volume_id,
|
||||
error=True)
|
||||
|
||||
old_cinfo = jsonutils.loads(bdm['connection_info'])
|
||||
if old_cinfo and 'serial' not in old_cinfo:
|
||||
@@ -3601,6 +3605,10 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
self.volume_api.terminate_connection(context,
|
||||
new_volume_id,
|
||||
connector)
|
||||
self.volume_api.migrate_volume_completion(context,
|
||||
old_volume_id,
|
||||
new_volume_id,
|
||||
error=True)
|
||||
self.volume_api.attach(context,
|
||||
new_volume_id,
|
||||
instance['uuid'],
|
||||
@@ -3609,6 +3617,14 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
volume = self.volume_api.get(context, old_volume_id)
|
||||
self.volume_api.terminate_connection(context, old_volume_id, connector)
|
||||
self.volume_api.detach(context.elevated(), old_volume_id)
|
||||
|
||||
# If Cinder initiated the swap, it will keep the original ID
|
||||
comp_ret = self.volume_api.migrate_volume_completion(context,
|
||||
old_volume_id,
|
||||
new_volume_id,
|
||||
error=False)
|
||||
save_volume_id = comp_ret['save_volume_id']
|
||||
|
||||
# Update bdm
|
||||
values = {
|
||||
'instance_uuid': instance['uuid'],
|
||||
@@ -3617,7 +3633,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
'delete_on_termination': False,
|
||||
'virtual_name': None,
|
||||
'snapshot_id': None,
|
||||
'volume_id': new_volume_id,
|
||||
'volume_id': save_volume_id,
|
||||
'volume_size': None,
|
||||
'no_device': None}
|
||||
self.conductor_api.block_device_mapping_update_or_create(context,
|
||||
|
||||
@@ -607,6 +607,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
self.assertTrue(uuidutils.is_uuid_like(volume_id))
|
||||
volumes[volume_id]['status'] = 'available'
|
||||
|
||||
def fake_vol_migrate_volume_completion(context, old_volume_id,
|
||||
new_volume_id, error=False):
|
||||
self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
|
||||
self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
|
||||
return {'save_volume_id': new_volume_id}
|
||||
|
||||
def fake_func_exc(*args, **kwargs):
|
||||
raise AttributeError # Random exception
|
||||
|
||||
@@ -626,6 +632,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
lambda x: {})
|
||||
self.stubs.Set(self.compute.driver, 'swap_volume',
|
||||
lambda w, x, y, z: None)
|
||||
self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion',
|
||||
fake_vol_migrate_volume_completion)
|
||||
self.stubs.Set(self.compute.conductor_api,
|
||||
'block_device_mapping_update_or_create',
|
||||
lambda x, y: None)
|
||||
|
||||
@@ -278,6 +278,11 @@ class API(base.Base):
|
||||
return cinderclient(context).volumes.terminate_connection(volume_id,
|
||||
connector)
|
||||
|
||||
def migrate_volume_completion(self, context, old_volume_id, new_volume_id,
|
||||
error=False):
|
||||
return cinderclient(context).volumes.migrate_volume_completion(
|
||||
old_volume_id, new_volume_id, error)
|
||||
|
||||
def create(self, context, size, name, description, snapshot=None,
|
||||
image_id=None, volume_type=None, metadata=None,
|
||||
availability_zone=None):
|
||||
|
||||
Reference in New Issue
Block a user