Replace utils.spawn_n with spawn
As [1] switched over the implementation of spawn and spawn_n to the same futurist Executor.submit we can now replace all spawn_n usage with spawn and drop spawn_n from nova.utils. [1]I3494660e1aaa1db46f9f08494cb5817ec7020cc5 Change-Id: I0027f119c0fbe8d5298307324eaf30c5e9e152d3 Signed-off-by: Balazs Gibizer <gibi@redhat.com>
This commit is contained in:
@@ -2401,12 +2401,12 @@ class ComputeManager(manager.Manager):
|
||||
# NOTE(danms): We spawn here to return the RPC worker thread back to
|
||||
# the pool. Since what follows could take a really long time, we don't
|
||||
# want to tie up RPC workers.
|
||||
utils.spawn_n(_locked_do_build_and_run_instance,
|
||||
context, instance, image, request_spec,
|
||||
filter_properties, admin_password, injected_files,
|
||||
requested_networks, security_groups,
|
||||
block_device_mapping, node, limits, host_list,
|
||||
accel_uuids)
|
||||
utils.spawn(_locked_do_build_and_run_instance,
|
||||
context, instance, image, request_spec,
|
||||
filter_properties, admin_password, injected_files,
|
||||
requested_networks, security_groups,
|
||||
block_device_mapping, node, limits, host_list,
|
||||
accel_uuids)
|
||||
|
||||
def _check_device_tagging(self, requested_networks, block_device_mapping):
|
||||
tagging_requested = False
|
||||
@@ -10983,7 +10983,7 @@ class ComputeManager(manager.Manager):
|
||||
else:
|
||||
LOG.debug('Triggering sync for uuid %s', uuid)
|
||||
self._syncs_in_progress[uuid] = True
|
||||
nova.utils.pass_context(self._sync_power_pool.spawn_n,
|
||||
nova.utils.pass_context(self._sync_power_pool.spawn,
|
||||
_sync,
|
||||
db_instance)
|
||||
|
||||
|
||||
@@ -2144,7 +2144,7 @@ class ComputeTaskManager:
|
||||
skipped_host(target_ctxt, host, image_ids)
|
||||
continue
|
||||
|
||||
utils.pass_context(fetch_pool.spawn_n, wrap_cache_images,
|
||||
utils.pass_context(fetch_pool.spawn, wrap_cache_images,
|
||||
target_ctxt, host, image_ids)
|
||||
|
||||
# Wait until all those things finish
|
||||
|
||||
@@ -575,7 +575,7 @@ def check_greenthread_spawns(logical_line, filename):
|
||||
|
||||
N340
|
||||
"""
|
||||
msg = ("N340: Use nova.utils.%(spawn)s() rather than "
|
||||
msg = ("N340: Use nova.utils.spawn() rather than "
|
||||
"greenthread.%(spawn)s() and eventlet.%(spawn)s()")
|
||||
if "nova/utils.py" in filename or "nova/tests/" in filename:
|
||||
return
|
||||
|
||||
@@ -470,7 +470,7 @@ class HostManager(object):
|
||||
LOG.debug("END:_async_init_instance_info")
|
||||
|
||||
# Run this async so that we don't block the scheduler start-up
|
||||
utils.spawn_n(_async_init_instance_info, computes_by_cell)
|
||||
utils.spawn(_async_init_instance_info, computes_by_cell)
|
||||
|
||||
def _choose_host_filters(self, filter_cls_names):
|
||||
"""Since the caller may specify which filters to use we need
|
||||
|
||||
+1
-1
@@ -326,7 +326,7 @@ class TestCase(base.BaseTestCase):
|
||||
# all other tests.
|
||||
scheduler_utils.reset_globals()
|
||||
|
||||
# Wait for bare greenlets spawn_n()'ed from a GreenThreadPoolExecutor
|
||||
# Wait for bare greenlets spawn()'ed from a GreenThreadPoolExecutor
|
||||
# to finish before moving on from the test. When greenlets from a
|
||||
# previous test remain running, they may attempt to access structures
|
||||
# (like the database) that have already been torn down and can cause
|
||||
|
||||
Vendored
+4
-37
@@ -1210,8 +1210,8 @@ class IsolatedGreenPoolFixture(fixtures.Fixture):
|
||||
self.greenpool = origi_default_green_pool()
|
||||
self.greenpool.name = f"{self.test_case_id}.default"
|
||||
return self.greenpool
|
||||
# NOTE(sean-k-mooney): greenpools use eventlet.spawn and
|
||||
# eventlet.spawn_n so we can't stub out all calls to those functions.
|
||||
# NOTE(sean-k-mooney): greenpools use eventlet.spawn so we can't stub
|
||||
# out all calls to those functions.
|
||||
# Instead since nova only creates greenthreads directly via nova.utils
|
||||
# we stub out the default green pool. This will not capture
|
||||
# Greenthreads created via the standard lib threading module.
|
||||
@@ -1289,7 +1289,7 @@ class IsolatedGreenPoolFixture(fixtures.Fixture):
|
||||
'finished.'
|
||||
'They cannot be killed so they may interact with '
|
||||
'other tests if they raise exceptions. '
|
||||
'These greenlets were likely created by spawn_n and'
|
||||
'These greenlets were likely created by spawn and'
|
||||
'and therefore are not expected to return or raise.'
|
||||
)
|
||||
|
||||
@@ -1323,8 +1323,6 @@ class SpawnIsSynchronousFixture(fixtures.Fixture):
|
||||
|
||||
def setUp(self):
|
||||
super(SpawnIsSynchronousFixture, self).setUp()
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
'nova.utils.spawn_n', _FakeFuture))
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
'nova.utils.spawn', _FakeFuture))
|
||||
|
||||
@@ -1865,7 +1863,7 @@ class PropagateTestCaseIdToChildEventlets(fixtures.Fixture):
|
||||
# propagation
|
||||
caller = eventlet.getcurrent()
|
||||
# If there is no id set on us that means we were spawned with other
|
||||
# than nova.utils.spawn or spawn_n so the id propagation chain got
|
||||
# than nova.utils.spawn so the id propagation chain got
|
||||
# broken. We fall back to self.test_case_id from the fixture which
|
||||
# is good enough
|
||||
caller_test_case_id = getattr(
|
||||
@@ -1888,37 +1886,6 @@ class PropagateTestCaseIdToChildEventlets(fixtures.Fixture):
|
||||
self.useFixture(
|
||||
fixtures.MonkeyPatch('nova.utils.spawn', wrapped_spawn))
|
||||
|
||||
# now do the same with spawn_n
|
||||
orig_spawn_n = utils.spawn_n
|
||||
|
||||
def wrapped_spawn_n(func, *args, **kwargs):
|
||||
# This is still runs before the eventlet.spawn so read the id for
|
||||
# propagation
|
||||
caller = eventlet.getcurrent()
|
||||
# If there is no id set on us that means we were spawned with other
|
||||
# than nova.utils.spawn or spawn_n so the id propagation chain got
|
||||
# broken. We fall back to self.test_case_id from the fixture which
|
||||
# is good enough
|
||||
caller_test_case_id = getattr(
|
||||
caller, 'test_case_id', None) or self.test_case_id
|
||||
|
||||
@functools.wraps(func)
|
||||
def test_case_id_wrapper(*args, **kwargs):
|
||||
# This runs after the eventlet.spawn in the new child.
|
||||
# Propagate the id from our caller eventlet
|
||||
current = eventlet.getcurrent()
|
||||
current.test_case_id = caller_test_case_id
|
||||
return func(*args, **kwargs)
|
||||
|
||||
# call the original spawn_n to create the child but with our
|
||||
# new wrapper around its target
|
||||
return orig_spawn_n(test_case_id_wrapper, *args, **kwargs)
|
||||
|
||||
# let's replace nova.utils.spawn_n with the wrapped one that injects
|
||||
# our initialization to the child eventlet
|
||||
self.useFixture(
|
||||
fixtures.MonkeyPatch('nova.utils.spawn_n', wrapped_spawn_n))
|
||||
|
||||
|
||||
class ReaderWriterLock(lockutils.ReaderWriterLock):
|
||||
"""Wrap oslo.concurrency lockutils.ReaderWriterLock to support eventlet.
|
||||
|
||||
@@ -20,5 +20,5 @@ class SyncPool(eventlet.GreenPool):
|
||||
waits.
|
||||
"""
|
||||
|
||||
def spawn_n(self, func, *args, **kwargs):
|
||||
def spawn(self, func, *args, **kwargs):
|
||||
func(*args, **kwargs)
|
||||
|
||||
@@ -4070,7 +4070,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
|
||||
instance = mock.Mock()
|
||||
mock_get.return_value = [instance]
|
||||
with mock.patch.object(self.compute._sync_power_pool,
|
||||
'spawn_n') as mock_spawn:
|
||||
'spawn') as mock_spawn:
|
||||
self.compute._sync_power_states(mock.sentinel.context)
|
||||
mock_get.assert_called_with(mock.sentinel.context,
|
||||
self.compute.host, expected_attrs=[],
|
||||
|
||||
@@ -278,16 +278,16 @@ class TestIndirectionAPIFixture(testtools.TestCase):
|
||||
|
||||
class TestSpawnIsSynchronousFixture(testtools.TestCase):
|
||||
def test_spawn_patch(self):
|
||||
orig_spawn = utils.spawn_n
|
||||
orig_spawn = utils.spawn
|
||||
|
||||
fix = fixtures.SpawnIsSynchronousFixture()
|
||||
self.useFixture(fix)
|
||||
self.assertNotEqual(orig_spawn, utils.spawn_n)
|
||||
self.assertNotEqual(orig_spawn, utils.spawn)
|
||||
|
||||
def test_spawn_passes_through(self):
|
||||
self.useFixture(fixtures.SpawnIsSynchronousFixture())
|
||||
tester = mock.MagicMock()
|
||||
utils.spawn_n(tester.function, 'foo', bar='bar')
|
||||
utils.spawn(tester.function, 'foo', bar='bar')
|
||||
tester.function.assert_called_once_with('foo', bar='bar')
|
||||
|
||||
def test_spawn_return_has_result(self):
|
||||
|
||||
@@ -450,9 +450,6 @@ class HackingTestCase(test.NoDBTestCase):
|
||||
code = "nova.utils.spawn(func, arg1, kwarg1=kwarg1)"
|
||||
self._assert_has_no_errors(code, checks.check_greenthread_spawns)
|
||||
|
||||
code = "nova.utils.spawn_n(func, arg1, kwarg1=kwarg1)"
|
||||
self._assert_has_no_errors(code, checks.check_greenthread_spawns)
|
||||
|
||||
def test_config_option_regex_match(self):
|
||||
def should_match(code):
|
||||
self.assertTrue(checks.cfg_opt_re.match(code))
|
||||
|
||||
@@ -755,13 +755,12 @@ class SafeTruncateTestCase(test.NoDBTestCase):
|
||||
self.assertEqual(254, len(byte_message))
|
||||
|
||||
|
||||
class SpawnNTestCase(test.NoDBTestCase):
|
||||
class SpawnTestCase(test.NoDBTestCase):
|
||||
def setUp(self):
|
||||
super(SpawnNTestCase, self).setUp()
|
||||
super(SpawnTestCase, self).setUp()
|
||||
self.useFixture(context_fixture.ClearRequestContext())
|
||||
self.spawn_name = 'spawn_n'
|
||||
|
||||
def test_spawn_n_no_context(self):
|
||||
def test_spawn_no_context(self):
|
||||
self.assertIsNone(common_context.get_current())
|
||||
|
||||
def _fake_spawn(func, *args, **kwargs):
|
||||
@@ -773,10 +772,10 @@ class SpawnNTestCase(test.NoDBTestCase):
|
||||
pass
|
||||
pool = utils._get_default_green_pool()
|
||||
with mock.patch.object(pool, "submit", _fake_spawn):
|
||||
getattr(utils, self.spawn_name)(fake, 'test')
|
||||
getattr(utils, "spawn")(fake, 'test')
|
||||
self.assertIsNone(common_context.get_current())
|
||||
|
||||
def test_spawn_n_context(self):
|
||||
def test_spawn_context(self):
|
||||
self.assertIsNone(common_context.get_current())
|
||||
ctxt = context.RequestContext('user', 'project')
|
||||
|
||||
@@ -791,10 +790,10 @@ class SpawnNTestCase(test.NoDBTestCase):
|
||||
|
||||
pool = utils._get_default_green_pool()
|
||||
with mock.patch.object(pool, "submit", _fake_spawn):
|
||||
getattr(utils, self.spawn_name)(fake, ctxt, kwarg1='test')
|
||||
getattr(utils, "spawn")(fake, ctxt, kwarg1='test')
|
||||
self.assertEqual(ctxt, common_context.get_current())
|
||||
|
||||
def test_spawn_n_context_different_from_passed(self):
|
||||
def test_spawn_context_different_from_passed(self):
|
||||
self.assertIsNone(common_context.get_current())
|
||||
ctxt = context.RequestContext('user', 'project')
|
||||
ctxt_passed = context.RequestContext('user', 'project',
|
||||
@@ -812,16 +811,10 @@ class SpawnNTestCase(test.NoDBTestCase):
|
||||
|
||||
pool = utils._get_default_green_pool()
|
||||
with mock.patch.object(pool, "submit", _fake_spawn):
|
||||
getattr(utils, self.spawn_name)(fake, ctxt_passed, kwarg1='test')
|
||||
getattr(utils, "spawn")(fake, ctxt_passed, kwarg1='test')
|
||||
self.assertEqual(ctxt, common_context.get_current())
|
||||
|
||||
|
||||
class SpawnTestCase(SpawnNTestCase):
|
||||
def setUp(self):
|
||||
super(SpawnTestCase, self).setUp()
|
||||
self.spawn_name = 'spawn'
|
||||
|
||||
|
||||
class UT8TestCase(test.NoDBTestCase):
|
||||
def test_none_value(self):
|
||||
self.assertIsInstance(utils.utf8(None), type(None))
|
||||
|
||||
@@ -2606,7 +2606,7 @@ class IronicDriverSyncTestCase(IronicDriverTestCase):
|
||||
def setUp(self):
|
||||
super(IronicDriverSyncTestCase, self).setUp()
|
||||
self.driver.node_cache = {}
|
||||
# Since the code we're testing runs in a spawn_n green thread, ensure
|
||||
# Since the code we're testing runs in a spawn green thread, ensure
|
||||
# that the thread completes.
|
||||
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
|
||||
|
||||
|
||||
@@ -723,23 +723,6 @@ def spawn(func, *args, **kwargs) -> futurist.Future:
|
||||
_get_default_green_pool().submit, func, *args, **kwargs)
|
||||
|
||||
|
||||
def spawn_n(func, *args, **kwargs):
|
||||
"""Passthrough method for eventlet.greenpool.spawn.
|
||||
|
||||
This utility exists so that it can be stubbed for testing without
|
||||
interfering with the service spawns.
|
||||
|
||||
It will also grab the context from the threadlocal store and add it to
|
||||
the store on the new thread. This allows for continuity in logging the
|
||||
context when using this method to spawn a new thread.
|
||||
|
||||
Note that this is not different from calling spawn. Both spawn and
|
||||
spawn_n uses the eventlet.spawn.
|
||||
"""
|
||||
|
||||
spawn(func, *args, **kwargs)
|
||||
|
||||
|
||||
def tpool_execute(func, *args, **kwargs):
|
||||
"""Run func in a native thread"""
|
||||
return pass_context(tpool.execute, func, *args, **kwargs)
|
||||
|
||||
Reference in New Issue
Block a user