From 8ab51e4422f1fffd272f153e4f2ed4aba469b178 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Ribaud?= Date: Mon, 3 Oct 2022 17:56:06 +0200 Subject: [PATCH] Support resuming an instance with shares (compute manager part) Allow to resume an instance with shares attached. Manila is the OpenStack Shared Filesystems service. These series of patches implement changes required in Nova to allow the shares provided by Manila to be associated with and attached to instances using virtiofs. Implements: blueprint libvirt-virtiofs-attach-manila-shares Change-Id: I41639e51b624be0d09f8dad25e66cb8bd0185311 --- nova/compute/manager.py | 9 +++- nova/tests/unit/compute/test_compute.py | 58 ++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ce4eb78edd..4e6616bdef 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -7203,6 +7203,13 @@ class ComputeManager(manager.Manager): block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) + # This allows passing share_info to the resume operation for + # futur usage. However, this scenario is currently not possible + # because suspending an instance with a share is not permitted + # by libvirt. As a result, the suspend action involving a share + # is blocked by the API. + share_info = self._get_share_info(context, instance) + compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.RESUME, phase=fields.NotificationPhase.START, bdms=bdms) @@ -7212,7 +7219,7 @@ class ComputeManager(manager.Manager): with self._error_out_instance_on_exception(context, instance, instance_state=instance.vm_state): self.driver.resume(context, instance, network_info, - block_device_info) + block_device_info, share_info) instance.power_state = self._get_power_state(instance) diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index b28a1d6665..995380e52d 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -2877,12 +2877,17 @@ class ComputeTestCase(BaseTestCase, action='unpause', phase='end')]) self.compute.terminate_instance(self.context, instance, []) + @mock.patch('nova.virt.fake.FakeDriver.resume') + @mock.patch('nova.compute.manager.ComputeManager._get_share_info') @mock.patch('nova.compute.utils.notify_about_instance_action') @mock.patch('nova.context.RequestContext.elevated') - def test_suspend(self, mock_context, mock_notify): + def test_suspend(self, mock_context, mock_notify, mock_get_share_info, + mock_resume): # ensure instance can be suspended and resumed. context = self.context mock_context.return_value = context + share_info = objects.ShareMappingList() + mock_get_share_info.return_value = share_info instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(context, instance, {}, {}, {}, [], block_device_mapping=[]) @@ -2906,6 +2911,57 @@ class ComputeTestCase(BaseTestCase, action='suspend', phase='start'), mock.call(context, instance, 'fake-mini', action='suspend', phase='end')]) + + mock_get_share_info.assert_called_once_with(context, instance) + + mock_resume.assert_called_once_with( + self.context, instance, mock.ANY, mock.ANY, share_info) + + self.compute.terminate_instance(self.context, instance, []) + + @mock.patch('nova.compute.manager.ComputeManager.deny_share') + @mock.patch('nova.virt.fake.FakeDriver.resume') + @mock.patch('nova.compute.manager.ComputeManager._get_share_info') + @mock.patch('nova.compute.utils.notify_about_instance_action') + @mock.patch('nova.context.RequestContext.elevated') + def test_suspend_with_share(self, mock_context, mock_notify, + mock_get_share_info, mock_resume, mock_deny_share): + # ensure instance can be suspended and resumed. + context = self.context + mock_context.return_value = context + share_info = self.fake_share_info() + mock_get_share_info.return_value = share_info + instance = self._create_fake_instance_obj() + self.compute.build_and_run_instance(context, instance, {}, {}, {}, + [], block_device_mapping=[]) + instance.task_state = task_states.SUSPENDING + instance.save() + self.compute.suspend_instance(context, instance) + instance.task_state = task_states.RESUMING + instance.save() + self.compute.resume_instance(context, instance) + + self.assertEqual(len(self.notifier.notifications), 6) + + msg = self.notifier.notifications[2] + self.assertEqual(msg.event_type, + 'compute.instance.suspend.start') + msg = self.notifier.notifications[3] + self.assertEqual(msg.event_type, + 'compute.instance.suspend.end') + mock_notify.assert_has_calls([ + mock.call(context, instance, 'fake-mini', + action='suspend', phase='start'), + mock.call(context, instance, 'fake-mini', + action='suspend', phase='end')]) + + mock_get_share_info.assert_called_once_with(context, instance) + + mock_resume.assert_called_once_with( + self.context, instance, mock.ANY, mock.ANY, share_info) + + # Because we have shares, terminate the instance requires + # to deny the share, so mocking is required self.compute.terminate_instance(self.context, instance, []) def test_suspend_error(self):