Rename deadline parameter to more accurate timeout

Change-Id: If57fb3ada65b658bd4b5cca62ec22485f431d2a4
Signed-off-by: Kamil Sambor <kamil.sambor@gmail.com>
This commit is contained in:
Kamil Sambor
2025-08-14 12:21:14 +02:00
parent 00f554fd92
commit 29e1dc8b43
6 changed files with 21 additions and 21 deletions
+9 -9
View File
@@ -504,7 +504,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
break
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
def wait_for_instance_event(self, instance, event_names, timeout=300,
error_callback=None):
"""Plan to wait for some events, run some code, then wait.
@@ -542,7 +542,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
:param event_names: A list of event names. Each element is a
tuple of strings to indicate (name, tag),
where name is required, but tag may be None.
:param deadline: Maximum number of seconds we should wait for all
:param timeout: Maximum number of seconds we should wait for all
of the specified events to arrive.
:param error_callback: A function to be called if an event arrives
@@ -564,7 +564,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
# NOTE(danms): Don't wait for any of the events. They
# should all be canceled and fired immediately below,
# but don't stick around if not.
deadline = 0
timeout = 0
try:
yield
except self._exit_early_exc as e:
@@ -581,7 +581,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
sw.start()
try:
self._wait_for_instance_events(
instance, events, error_callback, timeout=deadline)
instance, events, error_callback, timeout=timeout)
except exception.InstanceEventTimeout:
LOG.warning(
'Timeout waiting for %(events)s for instance with '
@@ -3023,7 +3023,7 @@ class ComputeManager(manager.Manager):
timeout = CONF.arq_binding_timeout
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout):
instance, events, timeout=timeout):
resolved_arqs = cyclient.get_arqs_for_instance(
instance.uuid, only_resolved=True)
# Events for these resolved ARQs may have already arrived.
@@ -3757,14 +3757,14 @@ class ComputeManager(manager.Manager):
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
image_size = int(math.ceil(float(image.get('size')) / units.Gi))
deadline = CONF.reimage_timeout_per_gb * image_size
timeout = CONF.reimage_timeout_per_gb * image_size
error_cb = self._reimage_failed_callback
# Call cinder to perform reimage operation and wait until an
# external event is triggered.
try:
with self.virtapi.wait_for_instance_event(instance, events,
deadline=deadline,
timeout=timeout,
error_callback=error_cb):
self.volume_api.reimage_volume(
context, root_bdm.volume_id, image_id,
@@ -9591,7 +9591,7 @@ class ComputeManager(manager.Manager):
else:
disk = None
deadline = CONF.vif_plugging_timeout
timeout = CONF.vif_plugging_timeout
error_cb = self._neutron_failed_live_migration_callback
# In order to avoid a race with the vif plugging that the virt
# driver does on the destination host, we register our events
@@ -9599,7 +9599,7 @@ class ComputeManager(manager.Manager):
# dest host reports back that we shouldn't wait, we can break
# out of the context manager using _BreakWaitForInstanceEvent.
with self.virtapi.wait_for_instance_event(
instance, events, deadline=deadline,
instance, events, timeout=timeout,
error_callback=error_cb):
with timeutils.StopWatch() as timer:
# TODO(mriedem): The "block_migration" parameter passed
+8 -8
View File
@@ -7682,7 +7682,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
bdms = [root_bdm]
events = [('volume-reimaged', root_bdm.volume_id)]
image_size_gb = 1
deadline = CONF.reimage_timeout_per_gb * image_size_gb
timeout = CONF.reimage_timeout_per_gb * image_size_gb
with test.nested(
mock.patch.object(objects.Instance, 'save',
@@ -7715,7 +7715,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_get_root_bdm.assert_called_once_with(
self.context, instance, bdms)
wait_inst_event.assert_called_once_with(
instance, events, deadline=deadline,
instance, events, timeout=timeout,
error_callback=self.compute._reimage_failed_callback)
@mock.patch('nova.volume.cinder.API.attachment_delete')
@@ -8630,7 +8630,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance, arq_uuids)
mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
self.instance, arq_events, timeout=mock.ANY)
mock_exit_wait_early.assert_called_once_with(arq_events)
mock_get_arqs.assert_has_calls([
@@ -8660,7 +8660,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance, arq_uuids=None)
mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
self.instance, arq_events, timeout=mock.ANY)
mock_exit_wait_early.assert_called_once_with(arq_events)
mock_get_arqs.assert_has_calls([
@@ -8692,7 +8692,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance, arq_uuids)
mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
self.instance, arq_events, timeout=mock.ANY)
mock_exit_wait_early.assert_not_called()
self.assertEqual(sorted(ret_arqs), sorted(arq_list))
mock_get_arqs.assert_has_calls([
@@ -8723,7 +8723,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance, arq_uuids)
mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
self.instance, arq_events, timeout=mock.ANY)
mock_exit_wait_early.assert_not_called()
mock_get_arqs.assert_not_called()
@@ -8752,7 +8752,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.context, self.instance, arq_uuids)
mock_wait_inst_ev.assert_called_once_with(
self.instance, arq_events, deadline=mock.ANY)
self.instance, arq_events, timeout=mock.ANY)
mock_exit_wait_early.assert_not_called()
mock_get_arqs.assert_called_once_with(
self.instance.uuid, only_resolved=True)
@@ -12048,7 +12048,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
migrate_data)
self.assertEqual(2, len(wait_for_event.call_args[0][1]))
self.assertEqual(CONF.vif_plugging_timeout,
wait_for_event.call_args[1]['deadline'])
wait_for_event.call_args[1]['timeout'])
mock_pre_live_mig.assert_called_once_with(
self.context, self.instance, None, None, 'dest-host',
migrate_data)
+1 -1
View File
@@ -690,7 +690,7 @@ class FakeDriver(driver.ComputeDriver):
class FakeVirtAPI(virtapi.VirtAPI):
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
def wait_for_instance_event(self, instance, event_names, timeout=300,
error_callback=None):
# NOTE(danms): Don't actually wait for any events, just
# fall through
+1 -1
View File
@@ -8351,7 +8351,7 @@ class LibvirtDriver(driver.ComputeDriver):
pause = bool(events)
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
instance, events, timeout=timeout,
error_callback=self._neutron_failed_callback,
):
self.plug_vifs(instance, network_info)
+1 -1
View File
@@ -17,7 +17,7 @@ import contextlib
class VirtAPI(object):
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
def wait_for_instance_event(self, instance, event_names, timeout=300,
error_callback=None):
raise NotImplementedError()
+1 -1
View File
@@ -286,7 +286,7 @@ class ZVMDriver(driver.ComputeDriver):
try:
event = self._get_neutron_event(network_info)
with self.virtapi.wait_for_instance_event(
instance, event, deadline=timeout,
instance, event, timeout=timeout,
error_callback=self._neutron_failed_callback):
self._setup_network(vm_name, os_distro, network_info, instance)
except exception.InstanceEventTimeout: