From 0b0f40d1b308b29da537859b72080488560c23d4 Mon Sep 17 00:00:00 2001 From: Artom Lifshitz Date: Fri, 26 Nov 2021 14:36:09 -0500 Subject: [PATCH] Revert "Revert resize: wait for events according to hybrid plug" This reverts commit 7a7a223602ca5aa0aca8f65a6ab143f1d8f8ec1b. That commit was added because - tl'dr - upon revert resize, Neutron with the OVS backend and the iptables security group driver would send us the network-vif-plugged event as soon as we updated the port binding. That behaviour has changed with commit 66c7f00e1d9. With that commit, we started unplugging the vifs on the source compute host when doing a resize. When reverting the resize, the vifs had to be re-plugged again, regarldess of the networking backend in use. This renders commit 7a7a223602ca5aa0aca8f65a6ab143f1d8f8ec1b. pointless, and it can be reverted. Conflicts - most have to do with context around this commit's code: nova/compute/manager.py a2984b647a4 added provider_mappings to _finish_revert_resize_network_migrate_finish()'s signature 750aef54b19 started using _finish_revert_resize_network_migrate_finish() in _finish_revert_snapshot_based_resize_at_source() nova/network/model.py 8b33ac06445 added get_live_migration_plug_time_events() and has_live_migration_plug_time_event() 7da94440db1 added has_port_with_allocation() nova/objects/migration.py f203da38387 added is_resize() and is_live_migration() nova/tests/unit/compute/test_compute.py a0e60feb3ec added request_spec to the test nova/tests/unit/compute/test_compute_mgr.py be278006a58 added unit tests below ours nova/tests/unit/network/test_network_info.py 7da94440db1 (again) added tests for has_port_with_allocation() nova/tests/unit/virt/libvirt/test_driver.py and nova/virt/libvirt/driver.py are different in that attempting to identify individual conflicts is a pointless exercise, as so much has changed (mdev, vtmp, the recent wait for events during hard reboot workaround config option, etc). They can be treated as manual removal of any code that had to do with the bind-time events logic (though guided by the conflict markers in git). TODO(artom) There was a follow up commit, 78a08d44ea68b31e27ce344f452756886ad309bd, that added the migration parameter to finish_revert_migration(). This is no longer needed, as the migration was only used to obtain plug-time events. We'll have to undo that as well. Closes-bug: 1952003 Change-Id: I3cb39a9ec2c260f422b3c48122b9db512cdd799b --- .zuul.yaml | 3 +- nova/compute/manager.py | 81 +++++--------- nova/network/model.py | 25 ----- nova/objects/migration.py | 3 - nova/tests/unit/compute/test_compute.py | 8 +- nova/tests/unit/compute/test_compute_mgr.py | 106 ++----------------- nova/tests/unit/network/test_network_info.py | 29 ----- nova/tests/unit/objects/test_migration.py | 8 -- nova/tests/unit/virt/libvirt/test_driver.py | 64 ++--------- nova/virt/libvirt/driver.py | 11 +- 10 files changed, 52 insertions(+), 286 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 74658ed1a9..1c3d13d5dd 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -605,8 +605,7 @@ - nova-lvm - nova-multi-cell - nova-next - - nova-ovs-hybrid-plug: - voting: false + - nova-ovs-hybrid-plug - nova-tox-validate-backport: voting: false - nova-tox-functional-centos8-py36 diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 14834a86b6..b066b6cc01 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -4809,8 +4809,18 @@ class ComputeManager(manager.Manager): self.host, instance=instance) # TODO(mriedem): Calculate provider mappings when we support # cross-cell resize/migrate with ports having resource requests. - self._finish_revert_resize_network_migrate_finish( - ctxt, instance, migration, provider_mappings=None) + # NOTE(hanrong): we need to change migration.dest_compute to + # source host temporarily. + # "network_api.migrate_instance_finish" will setup the network + # for the instance on the destination host. For revert resize, + # the instance will back to the source host, the setup of the + # network for instance should be on the source host. So set + # the migration.dest_compute to source host at here. + with utils.temporary_mutation( + migration, dest_compute=migration.source_compute + ): + self.network_api.migrate_instance_finish( + ctxt, instance, migration, provider_mappings=None) network_info = self.network_api.get_instance_nw_info(ctxt, instance) # Remember that prep_snapshot_based_resize_at_source destroyed the @@ -4902,50 +4912,6 @@ class ComputeManager(manager.Manager): self.compute_rpcapi.finish_revert_resize(context, instance, migration, migration.source_compute, request_spec) - def _finish_revert_resize_network_migrate_finish( - self, context, instance, migration, provider_mappings): - """Causes port binding to be updated. In some Neutron or port - configurations - see NetworkModel.get_bind_time_events() - we - expect the vif-plugged event from Neutron immediately and wait for it. - The rest of the time, the event is expected further along in the - virt driver, so we don't wait here. - - :param context: The request context. - :param instance: The instance undergoing the revert resize. - :param migration: The Migration object of the resize being reverted. - :param provider_mappings: a dict of list of resource provider uuids - keyed by port uuid - :raises: eventlet.timeout.Timeout or - exception.VirtualInterfacePlugException. - """ - network_info = instance.get_network_info() - events = [] - deadline = CONF.vif_plugging_timeout - if deadline and network_info: - events = network_info.get_bind_time_events(migration) - if events: - LOG.debug('Will wait for bind-time events: %s', events) - error_cb = self._neutron_failed_migration_callback - try: - with self.virtapi.wait_for_instance_event(instance, events, - deadline=deadline, - error_callback=error_cb): - # NOTE(hanrong): we need to change migration.dest_compute to - # source host temporarily. - # "network_api.migrate_instance_finish" will setup the network - # for the instance on the destination host. For revert resize, - # the instance will back to the source host, the setup of the - # network for instance should be on the source host. So set - # the migration.dest_compute to source host at here. - with utils.temporary_mutation( - migration, dest_compute=migration.source_compute): - self.network_api.migrate_instance_finish( - context, instance, migration, provider_mappings) - except eventlet.timeout.Timeout: - with excutils.save_and_reraise_exception(): - LOG.error('Timeout waiting for Neutron events: %s', events, - instance=instance) - @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @@ -5003,8 +4969,18 @@ class ComputeManager(manager.Manager): self.network_api.setup_networks_on_host(context, instance, migration.source_compute) - self._finish_revert_resize_network_migrate_finish( - context, instance, migration, provider_mappings) + # NOTE(hanrong): we need to change migration.dest_compute to + # source host temporarily. "network_api.migrate_instance_finish" + # will setup the network for the instance on the destination host. + # For revert resize, the instance will back to the source host, the + # setup of the network for instance should be on the source host. + # So set the migration.dest_compute to source host at here. + with utils.temporary_mutation( + migration, dest_compute=migration.source_compute): + self.network_api.migrate_instance_finish(context, + instance, + migration, + provider_mappings) network_info = self.network_api.get_instance_nw_info(context, instance) @@ -5081,8 +5057,7 @@ class ComputeManager(manager.Manager): # the provider mappings. If the instance has ports with # resource request then the port update will fail in # _update_port_binding_for_instance() called via - # _finish_revert_resize_network_migrate_finish() in - # finish_revert_resize. + # migrate_instance_finish() in finish_revert_resize. provider_mappings = None return provider_mappings @@ -8306,8 +8281,8 @@ class ComputeManager(manager.Manager): return migrate_data @staticmethod - def _neutron_failed_migration_callback(event_name, instance): - msg = ('Neutron reported failure during migration ' + def _neutron_failed_live_migration_callback(event_name, instance): + msg = ('Neutron reported failure during live migration ' 'with %(event)s for instance %(uuid)s') msg_args = {'event': event_name, 'uuid': instance.uuid} if CONF.vif_plugging_is_fatal: @@ -8403,7 +8378,7 @@ class ComputeManager(manager.Manager): disk = None deadline = CONF.vif_plugging_timeout - error_cb = self._neutron_failed_migration_callback + error_cb = self._neutron_failed_live_migration_callback # In order to avoid a race with the vif plugging that the virt # driver does on the destination host, we register our events # to wait for before calling pre_live_migration. Then if the diff --git a/nova/network/model.py b/nova/network/model.py index 5268baf112..64995c9527 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -485,17 +485,6 @@ class VIF(Model): 'ips': ips} return [] - def has_bind_time_event(self, migration): - """Returns whether this VIF's network-vif-plugged external event will - be sent by Neutron at "bind-time" - in other words, as soon as the port - binding is updated. This is in the context of updating the port binding - to a host that already has the instance in a shutoff state - in - practice, this means reverting either a cold migration or a - non-same-host resize. - """ - return (self.is_hybrid_plug_enabled() and not - migration.is_same_host()) - @property def has_live_migration_plug_time_event(self): """Returns whether this VIF's network-vif-plugged external event will @@ -564,13 +553,6 @@ class NetworkInfo(list): def json(self): return jsonutils.dumps(self) - def get_bind_time_events(self, migration): - """Returns a list of external events for any VIFs that have - "bind-time" events during cold migration. - """ - return [('network-vif-plugged', vif['id']) - for vif in self if vif.has_bind_time_event(migration)] - def get_live_migration_plug_time_events(self): """Returns a list of external events for any VIFs that have "plug-time" events during live migration. @@ -578,13 +560,6 @@ class NetworkInfo(list): return [('network-vif-plugged', vif['id']) for vif in self if vif.has_live_migration_plug_time_event] - def get_plug_time_events(self, migration): - """Returns a list of external events for any VIFs that have - "plug-time" events during cold migration. - """ - return [('network-vif-plugged', vif['id']) - for vif in self if not vif.has_bind_time_event(migration)] - def has_port_with_allocation(self): return any(vif.has_allocation() for vif in self) diff --git a/nova/objects/migration.py b/nova/objects/migration.py index b9afe1632f..cacb636ccc 100644 --- a/nova/objects/migration.py +++ b/nova/objects/migration.py @@ -202,9 +202,6 @@ class Migration(base.NovaPersistentObject, base.NovaObject, def instance(self, instance): self._cached_instance = instance - def is_same_host(self): - return self.source_compute == self.dest_compute - @property def is_live_migration(self): return self.migration_type == fields.MigrationType.LIVE_MIGRATION diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index 3ea9039524..f65f1abdb7 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -5801,9 +5801,7 @@ class ComputeTestCase(BaseTestCase, old_vm_state = vm_states.ACTIVE else: old_vm_state = vm_states.STOPPED - params = {'vm_state': old_vm_state, - 'info_cache': objects.InstanceInfoCache( - network_info=network_model.NetworkInfo([]))} + params = {'vm_state': old_vm_state} instance = self._create_fake_instance_obj(params) request_spec = objects.RequestSpec() @@ -5956,9 +5954,7 @@ class ComputeTestCase(BaseTestCase, def fake(*args, **kwargs): pass - params = {'info_cache': objects.InstanceInfoCache( - network_info=network_model.NetworkInfo([]))} - instance = self._create_fake_instance_obj(params) + instance = self._create_fake_instance_obj() request_spec = objects.RequestSpec() self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake) diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py index 945d2206ae..4d7967b37e 100644 --- a/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova/tests/unit/compute/test_compute_mgr.py @@ -6051,86 +6051,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase, self.context, fake_instance, fake_bdm) block_stats.assert_called_once_with(fake_instance, 'vda') - def _test_finish_revert_resize_network_migrate_finish( - self, vifs, events, migration=None): - instance = fake_instance.fake_instance_obj(self.context) - instance.info_cache = objects.InstanceInfoCache( - network_info=network_model.NetworkInfo(vifs)) - if migration is None: - migration = objects.Migration( - source_compute='fake-source', - dest_compute='fake-dest') - - def fake_migrate_instance_finish( - context, instance, migration, mapping): - # NOTE(artom) This looks weird, but it's checking that the - # temporaty_mutation() context manager did its job. - self.assertEqual(migration.dest_compute, migration.source_compute) - - with test.nested( - mock.patch.object(self.compute.virtapi, - 'wait_for_instance_event'), - mock.patch.object(self.compute.network_api, - 'migrate_instance_finish', - side_effect=fake_migrate_instance_finish) - ) as (mock_wait, mock_migrate_instance_finish): - self.compute._finish_revert_resize_network_migrate_finish( - self.context, instance, migration, mock.sentinel.mapping) - mock_wait.assert_called_once_with( - instance, events, deadline=CONF.vif_plugging_timeout, - error_callback=self.compute._neutron_failed_migration_callback) - mock_migrate_instance_finish.assert_called_once_with( - self.context, instance, migration, mock.sentinel.mapping) - - def test_finish_revert_resize_network_migrate_finish_wait(self): - """Test that we wait for bind-time events if we have a hybrid-plugged - VIF. - """ - self._test_finish_revert_resize_network_migrate_finish( - [network_model.VIF(id=uuids.hybrid_vif, - details={'ovs_hybrid_plug': True}), - network_model.VIF(id=uuids.normal_vif, - details={'ovs_hybrid_plug': False})], - [('network-vif-plugged', uuids.hybrid_vif)]) - - def test_finish_revert_resize_network_migrate_finish_same_host(self): - """Test that we're not waiting for any events if its a same host - resize revert. - """ - migration = objects.Migration( - source_compute='fake-source', dest_compute='fake-source') - - self._test_finish_revert_resize_network_migrate_finish( - [network_model.VIF(id=uuids.hybrid_vif, - details={'ovs_hybrid_plug': True}), - network_model.VIF(id=uuids.normal_vif, - details={'ovs_hybrid_plug': False})], - [], migration=migration - ) - - def test_finish_revert_resize_network_migrate_finish_dont_wait(self): - """Test that we're not waiting for any events if we don't have any - hybrid-plugged VIFs. - """ - self._test_finish_revert_resize_network_migrate_finish( - [network_model.VIF(id=uuids.hybrid_vif, - details={'ovs_hybrid_plug': False}), - network_model.VIF(id=uuids.normal_vif, - details={'ovs_hybrid_plug': False})], - []) - - def test_finish_revert_resize_network_migrate_finish_no_vif_timeout(self): - """Test that we're not waiting for any events if vif_plugging_timeout - is 0. - """ - self.flags(vif_plugging_timeout=0) - self._test_finish_revert_resize_network_migrate_finish( - [network_model.VIF(id=uuids.hybrid_vif, - details={'ovs_hybrid_plug': True}), - network_model.VIF(id=uuids.normal_vif, - details={'ovs_hybrid_plug': True})], - []) - @mock.patch('nova.compute.manager.LOG') def test_cache_images_unsupported(self, mock_log): r = self.compute.cache_images(self.context, ['an-image']) @@ -8877,8 +8797,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, do_finish_revert_resize() @mock.patch.object(objects.Instance, 'drop_migration_context') - @mock.patch('nova.compute.manager.ComputeManager.' - '_finish_revert_resize_network_migrate_finish') + @mock.patch('nova.network.neutron.API.migrate_instance_finish') @mock.patch('nova.scheduler.utils.' 'fill_provider_mapping_based_on_allocation') @mock.patch('nova.compute.manager.ComputeManager._revert_allocation') @@ -8892,7 +8811,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, def test_finish_revert_resize_recalc_group_rp_mapping( self, mock_get_bdms, mock_notify_action, mock_notify_usage, mock_set_instance_info, mock_instance_save, mock_revert_allocation, - mock_fill_provider_mapping, mock_network_migrate_finish, + mock_fill_provider_mapping, mock_migrate_instance_finish, mock_drop_migration_context): mock_get_bdms.return_value = objects.BlockDeviceMappingList() @@ -8909,8 +8828,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, mock.sentinel.allocation) @mock.patch.object(objects.Instance, 'drop_migration_context') - @mock.patch('nova.compute.manager.ComputeManager.' - '_finish_revert_resize_network_migrate_finish') + @mock.patch('nova.network.neutron.API.migrate_instance_finish') @mock.patch('nova.scheduler.utils.' 'fill_provider_mapping_based_on_allocation') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' @@ -8927,7 +8845,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, self, mock_get_bdms, mock_notify_action, mock_notify_usage, mock_set_instance_info, mock_instance_save, mock_revert_allocation, mock_get_allocations, mock_fill_provider_mapping, - mock_network_migrate_finish, mock_drop_migration_context): + mock_migrate_instance_finish, mock_drop_migration_context): mock_get_bdms.return_value = objects.BlockDeviceMappingList() mock_get_allocations.return_value = mock.sentinel.allocation @@ -8941,7 +8859,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, mock_get_allocations.assert_not_called() mock_fill_provider_mapping.assert_not_called() - mock_network_migrate_finish.assert_called_once_with( + mock_migrate_instance_finish.assert_called_once_with( self.context, self.instance, self.migration, None) def test_confirm_resize_deletes_allocations_and_update_scheduler(self): @@ -12126,8 +12044,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') @mock.patch('nova.compute.manager.ComputeManager.' '_update_volume_attachments') - @mock.patch('nova.compute.manager.ComputeManager.' - '_finish_revert_resize_network_migrate_finish') + @mock.patch('nova.network.neutron.API.migrate_instance_finish') @mock.patch('nova.compute.manager.ComputeManager.' '_get_instance_block_device_info') @mock.patch('nova.objects.Instance.drop_migration_context') @@ -12137,7 +12054,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, '_complete_volume_attachments') def test_finish_revert_snapshot_based_resize_at_source( self, mock_complete_attachments, mock_update_after_spawn, - mock_drop_mig_context, mock_get_bdi, mock_net_migrate_finish, + mock_drop_mig_context, mock_get_bdi, mock_migrate_instance_finish, mock_update_attachments, mock_get_bdms, mock_revert_allocs, mock_inst_save): """Happy path test for finish_revert_snapshot_based_resize_at_source. @@ -12177,7 +12094,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, mock_update_attachments.assert_called_once_with( self.context, self.instance, mock_get_bdms.return_value) # Assert that port bindings were updated to point at the source host. - mock_net_migrate_finish.assert_called_once_with( + mock_migrate_instance_finish.assert_called_once_with( self.context, self.instance, self.migration, provider_mappings=None) # Assert the driver finished reverting the migration. @@ -12202,8 +12119,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') @mock.patch('nova.compute.manager.ComputeManager.' '_update_volume_attachments') - @mock.patch('nova.compute.manager.ComputeManager.' - '_finish_revert_resize_network_migrate_finish') + @mock.patch('nova.network.neutron.API.migrate_instance_finish') @mock.patch('nova.compute.manager.ComputeManager.' '_get_instance_block_device_info') @mock.patch('nova.objects.Instance.drop_migration_context') @@ -12214,7 +12130,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, side_effect=test.TestingException('vol complete failed')) def test_finish_revert_snapshot_based_resize_at_source_driver_fails( self, mock_complete_attachments, mock_update_after_spawn, - mock_drop_mig_context, mock_get_bdi, mock_net_migrate_finish, + mock_drop_mig_context, mock_get_bdi, mock_migrate_instance_finish, mock_update_attachments, mock_get_bdms, mock_revert_allocs, mock_inst_save): """Test for _finish_revert_snapshot_based_resize_at_source where the @@ -12268,7 +12184,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase, mock_update_attachments.assert_called_once_with( self.context, self.instance, mock_get_bdms.return_value) # Assert that port bindings were updated to point at the source host. - mock_net_migrate_finish.assert_called_once_with( + mock_migrate_instance_finish.assert_called_once_with( self.context, self.instance, self.migration, provider_mappings=None) # Assert final DB cleanup for the instance did not happen. diff --git a/nova/tests/unit/network/test_network_info.py b/nova/tests/unit/network/test_network_info.py index b9013d6f54..0420e2d791 100644 --- a/nova/tests/unit/network/test_network_info.py +++ b/nova/tests/unit/network/test_network_info.py @@ -19,7 +19,6 @@ from oslo_utils.fixture import uuidsentinel as uuids from nova import exception from nova.network import model -from nova import objects from nova import test from nova.tests.unit import fake_network_cache_model from nova.virt import netutils @@ -855,34 +854,6 @@ iface eth1 inet static libvirt_virt_type='lxc') self.assertEqual(expected, template) - def test_get_events(self): - network_info = model.NetworkInfo([ - model.VIF( - id=uuids.hybrid_vif, - details={'ovs_hybrid_plug': True}), - model.VIF( - id=uuids.normal_vif, - details={'ovs_hybrid_plug': False})]) - same_host = objects.Migration(source_compute='fake-host', - dest_compute='fake-host') - diff_host = objects.Migration(source_compute='fake-host1', - dest_compute='fake-host2') - # Same-host migrations will have all events be plug-time. - self.assertCountEqual( - [('network-vif-plugged', uuids.normal_vif), - ('network-vif-plugged', uuids.hybrid_vif)], - network_info.get_plug_time_events(same_host)) - # Same host migration will have no plug-time events. - self.assertEqual([], network_info.get_bind_time_events(same_host)) - # Diff-host migration + OVS hybrid plug = bind-time events - self.assertEqual( - [('network-vif-plugged', uuids.hybrid_vif)], - network_info.get_bind_time_events(diff_host)) - # Diff-host migration + normal OVS = plug-time events - self.assertEqual( - [('network-vif-plugged', uuids.normal_vif)], - network_info.get_plug_time_events(diff_host)) - def test_has_port_with_allocation(self): network_info = model.NetworkInfo([]) self.assertFalse(network_info.has_port_with_allocation()) diff --git a/nova/tests/unit/objects/test_migration.py b/nova/tests/unit/objects/test_migration.py index 6d365c01d6..970122a409 100644 --- a/nova/tests/unit/objects/test_migration.py +++ b/nova/tests/unit/objects/test_migration.py @@ -359,14 +359,6 @@ class _TestMigrationObject(object): mig = objects.Migration.get_by_uuid(self.context, uuidsentinel.mig) self.assertEqual(uuidsentinel.mig, mig.uuid) - def test_is_same_host(self): - same_host = objects.Migration(source_compute='fake-host', - dest_compute='fake-host') - diff_host = objects.Migration(source_compute='fake-host1', - dest_compute='fake-host2') - self.assertTrue(same_host.is_same_host()) - self.assertFalse(diff_host.is_same_host()) - class TestMigrationObject(test_objects._LocalTest, _TestMigrationObject): diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py index 80f42da3d9..50cb5536ef 100644 --- a/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova/tests/unit/virt/libvirt/test_driver.py @@ -19169,10 +19169,9 @@ class LibvirtConnTestCase(test.NoDBTestCase, self.assertEqual(0, domain.resume.call_count) def _test_create_guest_with_network__events( - self, neutron_failure=None, power_on=True, events=None, + self, neutron_failure=None, power_on=True, ): generated_events = [] - events_passed_to_prepare = [] def wait_timeout(): event = mock.MagicMock() @@ -19190,7 +19189,6 @@ class LibvirtConnTestCase(test.NoDBTestCase, m.event_name = '%s-%s' % (name, tag) m.wait.side_effect = wait_timeout generated_events.append(m) - events_passed_to_prepare.append((name, tag)) return m virtapi = manager.ComputeVirtAPI(mock.MagicMock()) @@ -19209,8 +19207,7 @@ class LibvirtConnTestCase(test.NoDBTestCase, def test_create(cleanup, create, plug_vifs): domain = drvr._create_guest_with_network(self.context, 'xml', instance, vifs, None, - power_on=power_on, - external_events=events) + power_on=power_on) plug_vifs.assert_called_with(instance, vifs) pause = self._get_pause_flag(drvr, vifs, power_on=power_on) @@ -19225,9 +19222,7 @@ class LibvirtConnTestCase(test.NoDBTestCase, test_create() - if events and CONF.vif_plugging_timeout: - self.assertEqual(events_passed_to_prepare, events) - elif CONF.vif_plugging_timeout and power_on: + if CONF.vif_plugging_timeout and power_on: prepare.assert_has_calls([ mock.call(instance, 'network-vif-plugged', uuids.vif_1), mock.call(instance, 'network-vif-plugged', uuids.vif_2)]) @@ -19243,15 +19238,6 @@ class LibvirtConnTestCase(test.NoDBTestCase, def test_create_guest_with_network__events_neutron(self): self._test_create_guest_with_network__events() - def test_create_guest_with_network__events_passed_in(self): - self._test_create_guest_with_network__events( - events=[('network-vif-plugged', uuids.fake_vif)]) - - def test_create_guest_with_network__events_passed_in_0_timeout(self): - self.flags(vif_plugging_timeout=0) - self._test_create_guest_with_network__events( - events=[('network-vif-plugged', uuids.fake_vif)]) - def test_create_guest_with_network_events_neutron_power_off(self): # Tests that we don't wait for events if we don't start the instance. self._test_create_guest_with_network__events(power_on=False) @@ -22241,7 +22227,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin): mock_restore_vtpm.assert_not_called() mock_delete_vtpm.assert_not_called() - def _test_finish_revert_migration(self, power_on, migration): + def _test_finish_revert_migration(self, power_on): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_revert_migration. """ @@ -22258,14 +22244,11 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin): def fake_create_guest_with_network( _self, context, xml, instance, network_info, block_device_info, power_on=None, vifs_already_plugged=None, post_xml_callback=None, - external_events=None, cleanup_instance_dir=False, - cleanup_instance_disks=False, + cleanup_instance_dir=False, cleanup_instance_disks=False, ): self.fake_create_guest_called = True self.assertEqual(powered_on, power_on) self.assertFalse(vifs_already_plugged) - self.assertEqual(self.events_passed_to_fake_create, - external_events) return mock.MagicMock() def fake_get_info(self, instance): @@ -22304,51 +22287,22 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin): f = open(libvirt_xml_path, 'w') f.close() - network_info = network_model.NetworkInfo( - [network_model.VIF(id=uuids.normal_vif), - network_model.VIF(id=uuids.hybrid_vif, - details={'ovs_hybrid_plug': True})]) - if migration.is_same_host(): - # Same host is all plug-time - self.events_passed_to_fake_create = [ - ('network-vif-plugged', uuids.normal_vif), - ('network-vif-plugged', uuids.hybrid_vif)] - else: - # For different host migration only non-hybrid plug - # ("normal") VIFs "emit" plug-time events. - self.events_passed_to_fake_create = [ - ('network-vif-plugged', uuids.normal_vif)] - with mock.patch.object( self.drvr, '_get_all_assigned_mediated_devices', return_value={} ) as mock_get_a_mdevs: self.drvr.finish_revert_migration( - self.context, instance, network_info, migration, - power_on=power_on) + self.context, instance, network_model.NetworkInfo(), + objects.Migration(), power_on=power_on) self.assertTrue(self.fake_create_guest_called) mock_get_a_mdevs.assert_called_once_with(mock.ANY) def test_finish_revert_migration_power_on(self): - migration = objects.Migration(id=42, source_compute='fake-host1', - dest_compute='fake-host2') - self._test_finish_revert_migration(power_on=True, migration=migration) + self._test_finish_revert_migration(True) def test_finish_revert_migration_power_off(self): - migration = objects.Migration(id=42, source_compute='fake-host1', - dest_compute='fake-host2') - self._test_finish_revert_migration(power_on=False, migration=migration) - - def test_finish_revert_migration_same_host(self): - migration = objects.Migration(id=42, source_compute='fake-host', - dest_compute='fake-host') - self._test_finish_revert_migration(power_on=True, migration=migration) - - def test_finish_revert_migration_diff_host(self): - migration = objects.Migration(id=42, source_compute='fake-host1', - dest_compute='fake-host2') - self._test_finish_revert_migration(power_on=True, migration=migration) + self._test_finish_revert_migration(False) def _test_finish_revert_migration_after_crash(self, backup_made=True, del_inst_failed=False): diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index e88d9068c6..2ea493d452 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -11278,18 +11278,9 @@ class LibvirtDriver(driver.ComputeDriver): instance.image_meta, block_device_info=block_device_info, mdevs=mdevs) - # NOTE(artom) In some Neutron or port configurations we've already - # waited for vif-plugged events in the compute manager's - # _finish_revert_resize_network_migrate_finish(), right after updating - # the port binding. For any ports not covered by those "bind-time" - # events, we wait for "plug-time" events here. - events = network_info.get_plug_time_events(migration) - if events: - LOG.debug('Instance is using plug-time events: %s', events, - instance=instance) self._create_guest_with_network( context, xml, instance, network_info, block_device_info, - power_on=power_on, external_events=events) + power_on=power_on) if power_on: timer = loopingcall.FixedIntervalLoopingCall(