From 70296431908208375677ae428a147f312b60e1ad Mon Sep 17 00:00:00 2001 From: Claudiu Belu Date: Thu, 29 Jun 2017 17:00:31 +0300 Subject: [PATCH] hyperv: Cleans up live migration Planned VM If an instance having iSCSI volumes attached is being live-migrated, a Planned VM is created at the destination. If the live-migration fails, the Planned VM is not cleaned up at the destination. This patch will ensure that rollback_live_migration_at_destination is being called on live-migration failure for Hyper-V compute nodes, which will clean up the Planned VM. Depends-On: I91636a82b057f566eab9887c422911163668f556 Change-Id: I4d61325793ed559dede408813ebda5aed2b0f110 Closes-Bug: #1604078 --- nova/compute/manager.py | 27 +++++++++---------- nova/tests/unit/compute/test_compute_mgr.py | 7 +++++ nova/tests/unit/virt/hyperv/test_vmops.py | 22 ++++++++++++--- nova/tests/unit/virt/hyperv/test_volumeops.py | 8 ++++++ nova/virt/hyperv/vmops.py | 6 +++++ nova/virt/hyperv/volumeops.py | 6 +++++ 6 files changed, 58 insertions(+), 18 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0e31263a67..14fadfff9d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -6513,23 +6513,22 @@ class ComputeManager(manager.Manager): # migrate_data objects for drivers that expose block live migration # information (i.e. Libvirt, Xenapi and HyperV). For other drivers # cleanup is not needed. - is_shared_block_storage = True - is_shared_instance_path = True + do_cleanup = False + destroy_disks = False if isinstance(migrate_data, migrate_data_obj.LibvirtLiveMigrateData): - is_shared_block_storage = migrate_data.is_shared_block_storage - is_shared_instance_path = migrate_data.is_shared_instance_path + # No instance booting at source host, but instance dir + # must be deleted for preparing next block migration + # must be deleted for preparing next live migration w/o shared + # storage + do_cleanup = not migrate_data.is_shared_instance_path + destroy_disks = not migrate_data.is_shared_block_storage elif isinstance(migrate_data, migrate_data_obj.XenapiLiveMigrateData): - is_shared_block_storage = not migrate_data.block_migration - is_shared_instance_path = not migrate_data.block_migration + do_cleanup = migrate_data.block_migration + destroy_disks = migrate_data.block_migration elif isinstance(migrate_data, migrate_data_obj.HyperVLiveMigrateData): - is_shared_instance_path = migrate_data.is_shared_instance_path - is_shared_block_storage = migrate_data.is_shared_instance_path - - # No instance booting at source host, but instance dir - # must be deleted for preparing next block migration - # must be deleted for preparing next live migration w/o shared storage - do_cleanup = not is_shared_instance_path - destroy_disks = not is_shared_block_storage + # NOTE(claudiub): We need to cleanup any zombie Planned VM. + do_cleanup = True + destroy_disks = not migrate_data.is_shared_instance_path return (do_cleanup, destroy_disks) diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py index 72157a27dc..923b99a8ee 100644 --- a/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova/tests/unit/compute/test_compute_mgr.py @@ -8334,6 +8334,13 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase): def test_live_migration_cleanup_flags_shared_hyperv(self): migrate_data = objects.HyperVLiveMigrateData( is_shared_instance_path=True) + do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( + migrate_data) + self.assertTrue(do_cleanup) + self.assertFalse(destroy_disks) + + def test_live_migration_cleanup_flags_other(self): + migrate_data = mock.Mock() do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( migrate_data) self.assertFalse(do_cleanup) diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py index a5fa8e90fe..39141774a3 100644 --- a/nova/tests/unit/virt/hyperv/test_vmops.py +++ b/nova/tests/unit/virt/hyperv/test_vmops.py @@ -70,6 +70,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase): self._vmops._vhdutils = mock.MagicMock() self._vmops._pathutils = mock.MagicMock() self._vmops._hostutils = mock.MagicMock() + self._vmops._migrutils = mock.MagicMock() self._vmops._serial_console_ops = mock.MagicMock() self._vmops._block_dev_man = mock.MagicMock() self._vmops._vif_driver = mock.MagicMock() @@ -1092,29 +1093,42 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase): self._vmops._pathutils.get_instance_dir.assert_called_once_with( mock_instance.name, create_dir=False, remove_dir=True) - @ddt.data(True, False) + @ddt.data({}, + {'vm_exists': True}, + {'planned_vm_exists': True}) + @ddt.unpack @mock.patch('nova.virt.hyperv.volumeops.VolumeOps.disconnect_volumes') @mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files') @mock.patch('nova.virt.hyperv.vmops.VMOps.power_off') @mock.patch('nova.virt.hyperv.vmops.VMOps.unplug_vifs') - def test_destroy(self, vm_exists, mock_unplug_vifs, mock_power_off, - mock_delete_disk_files, mock_disconnect_volumes): + def test_destroy(self, mock_unplug_vifs, mock_power_off, + mock_delete_disk_files, mock_disconnect_volumes, + vm_exists=False, planned_vm_exists=False): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops._vmutils.vm_exists.return_value = vm_exists + self._vmops._migrutils.planned_vm_exists.return_value = ( + planned_vm_exists) self._vmops.destroy(instance=mock_instance, block_device_info=mock.sentinel.FAKE_BD_INFO, network_info=mock.sentinel.fake_network_info) + mock_destroy_planned_vms = ( + self._vmops._migrutils.destroy_existing_planned_vm) if vm_exists: self._vmops._vmutils.stop_vm_jobs.assert_called_once_with( mock_instance.name) mock_power_off.assert_called_once_with(mock_instance) self._vmops._vmutils.destroy_vm.assert_called_once_with( mock_instance.name) + elif planned_vm_exists: + self._vmops._migrutils.planned_vm_exists.assert_called_once_with( + mock_instance.name) + mock_destroy_planned_vms.assert_called_once_with( + mock_instance.name) else: - self.assertFalse(mock_power_off.called) self.assertFalse(self._vmops._vmutils.destroy_vm.called) + self.assertFalse(mock_destroy_planned_vms.called) self._vmops._vmutils.vm_exists.assert_called_with( mock_instance.name) diff --git a/nova/tests/unit/virt/hyperv/test_volumeops.py b/nova/tests/unit/virt/hyperv/test_volumeops.py index 22d1b1aa50..630ee2d600 100644 --- a/nova/tests/unit/virt/hyperv/test_volumeops.py +++ b/nova/tests/unit/virt/hyperv/test_volumeops.py @@ -304,8 +304,10 @@ class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase): self._base_vol_driver._diskutils = mock.Mock() self._base_vol_driver._vmutils = mock.Mock() + self._base_vol_driver._migrutils = mock.Mock() self._base_vol_driver._conn = mock.Mock() self._vmutils = self._base_vol_driver._vmutils + self._migrutils = self._base_vol_driver._migrutils self._diskutils = self._base_vol_driver._diskutils self._conn = self._base_vol_driver._conn @@ -452,9 +454,15 @@ class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase): def test_attach_volume_block_dev(self): self._test_attach_volume(is_block_dev=True) + def test_detach_volume_planned_vm(self): + self._base_vol_driver.detach_volume(mock.sentinel.connection_info, + mock.sentinel.inst_name) + self._vmutils.detach_vm_disk.assert_not_called() + @mock.patch.object(volumeops.BaseVolumeDriver, 'get_disk_resource_path') def test_detach_volume(self, mock_get_disk_resource_path): + self._migrutils.planned_vm_exists.return_value = False connection_info = get_fake_connection_info() self._base_vol_driver.detach_volume(connection_info, diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index b37b525d30..74c30c6966 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -94,6 +94,7 @@ class VMOps(object): self._metricsutils = utilsfactory.get_metricsutils() self._vhdutils = utilsfactory.get_vhdutils() self._hostutils = utilsfactory.get_hostutils() + self._migrutils = utilsfactory.get_migrationutils() self._pathutils = pathutils.PathUtils() self._volumeops = volumeops.VolumeOps() self._imagecache = imagecache.ImageCache() @@ -738,9 +739,14 @@ class VMOps(object): self._vmutils.stop_vm_jobs(instance_name) self.power_off(instance) self._vmutils.destroy_vm(instance_name) + elif self._migrutils.planned_vm_exists(instance_name): + self._migrutils.destroy_existing_planned_vm(instance_name) else: LOG.debug("Instance not found", instance=instance) + # NOTE(claudiub): The vifs should be unplugged and the volumes + # should be disconnected even if the VM doesn't exist anymore, + # so they are not leaked. self.unplug_vifs(instance, network_info) self._volumeops.disconnect_volumes(block_device_info) diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py index 26095633bd..2c53e9b44c 100644 --- a/nova/virt/hyperv/volumeops.py +++ b/nova/virt/hyperv/volumeops.py @@ -210,6 +210,7 @@ class BaseVolumeDriver(object): self._conn = None self._diskutils = utilsfactory.get_diskutils() self._vmutils = utilsfactory.get_vmutils() + self._migrutils = utilsfactory.get_migrationutils() @property def _connector(self): @@ -277,6 +278,11 @@ class BaseVolumeDriver(object): slot) def detach_volume(self, connection_info, instance_name): + if self._migrutils.planned_vm_exists(instance_name): + LOG.warning("Instance %s is a Planned VM, cannot detach " + "volumes from it.", instance_name) + return + disk_path = self.get_disk_resource_path(connection_info) LOG.debug("Detaching disk %(disk_path)s "