diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py index 8880472965..bc6a2f68ae 100644 --- a/nova/tests/functional/compute/test_resource_tracker.py +++ b/nova/tests/functional/compute/test_resource_tracker.py @@ -22,9 +22,9 @@ from nova.compute import vm_states from nova import conf from nova import context from nova import objects +from nova import test from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers -from nova.tests.functional import test_report_client as test_base from nova.virt import driver as virt_driver @@ -35,7 +35,7 @@ DISK_GB = orc.DISK_GB COMPUTE_HOST = 'compute-host' -class IronicResourceTrackerTest(test_base.SchedulerReportClientTestBase): +class IronicResourceTrackerTest(test.TestCase): """Tests the behaviour of the resource tracker with regards to the transitional period between adding support for custom resource classes in the placement API and integrating inventory and allocation records for diff --git a/nova/tests/functional/test_report_client.py b/nova/tests/functional/test_report_client.py index c151fca2c1..b48d9a3d1f 100644 --- a/nova/tests/functional/test_report_client.py +++ b/nova/tests/functional/test_report_client.py @@ -17,12 +17,8 @@ from keystoneauth1 import exceptions as kse import mock import os_resource_classes as orc import os_traits as ot -from oslo_config import cfg -from oslo_config import fixture as config_fixture from oslo_utils.fixture import uuidsentinel as uuids import pkg_resources -from placement import direct -from placement.tests.functional.fixtures import placement from nova.cmd import status from nova.compute import provider_tree @@ -38,6 +34,7 @@ from nova import objects from nova.scheduler.client import report from nova.scheduler import utils from nova import test +from nova.tests.functional import fixtures as func_fixtures CONF = conf.CONF @@ -86,68 +83,11 @@ class VersionCheckingReportClient(report.SchedulerReportClient): return super(VersionCheckingReportClient, self).delete(*args, **kwargs) -class SchedulerReportClientTestBase(test.TestCase): - - def setUp(self): - super(SchedulerReportClientTestBase, self).setUp() - # Because these tests use PlacementDirect we need to manage - # the database and other config ourselves. - config = cfg.ConfigOpts() - placement_conf = self.useFixture(config_fixture.Config(config)) - self.useFixture( - placement.PlacementFixture(conf_fixture=placement_conf, db=True, - use_intercept=False)) - self.placement_conf = placement_conf.conf - - def _interceptor(self, app=None, latest_microversion=True): - """Set up an intercepted placement API to test against. - - Use as e.g. - - with interceptor() as client: - ret = client.get_provider_tree_and_ensure_root(...) - - :param app: An optional wsgi app loader. - :param latest_microversion: If True (the default), API requests will - use the latest microversion if not - otherwise specified. If False, the base - microversion is the default. - :return: Context manager, which in turn returns a direct - SchedulerReportClient. - """ - class ReportClientInterceptor(direct.PlacementDirect): - """A shim around PlacementDirect that wraps the Adapter in a - SchedulerReportClient. - """ - def __enter__(inner_self): - adap = super(ReportClientInterceptor, inner_self).__enter__() - client = VersionCheckingReportClient(adapter=adap) - # NOTE(efried): This `self` is the TestCase! - self._set_client(client) - return client - - interceptor = ReportClientInterceptor( - self.placement_conf, latest_microversion=latest_microversion) - if app: - interceptor.app = app - return interceptor - - def _set_client(self, client): - """Set report client attributes on the TestCase instance. - - Override this to do things like: - self.mocked_thingy.report_client = client - - :param client: A direct SchedulerReportClient. - """ - pass - - @ddt.ddt @mock.patch('nova.compute.utils.is_volume_backed_instance', new=mock.Mock(return_value=False)) @mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock()) -class SchedulerReportClientTests(SchedulerReportClientTestBase): +class SchedulerReportClientTests(test.TestCase): def setUp(self): super(SchedulerReportClientTests, self).setUp() @@ -176,9 +116,11 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase): extra_specs={})) self.context = context.get_admin_context() - def _set_client(self, client): - # TODO(efried): Rip this out and just use `as client` throughout. - self.client = client + # The ksa adapter used by the PlacementFixture, for mocking purposes. + self.placement_client = self.useFixture( + func_fixtures.PlacementFixture())._client + + self.client = VersionCheckingReportClient() def compute_node_to_inventory_dict(self): result = {} @@ -219,138 +161,135 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase): # tests that when allocation or inventory errors happen, we # are resilient. res_class = orc.VCPU - with self._interceptor(): - # When we start out there are no resource providers. - rp = self.client._get_resource_provider(self.context, - self.compute_uuid) - self.assertIsNone(rp) - rps = self.client.get_providers_in_tree(self.context, - self.compute_uuid) - self.assertEqual([], rps) - # But get_provider_tree_and_ensure_root creates one (via - # _ensure_resource_provider) - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) - # Now let's update status for our compute node. - self.client._ensure_resource_provider( - self.context, self.compute_uuid, name=self.compute_name) - self.client.set_inventory_for_provider( - self.context, self.compute_uuid, - self.compute_node_to_inventory_dict()) + # When we start out there are no resource providers. + rp = self.client._get_resource_provider(self.context, + self.compute_uuid) + self.assertIsNone(rp) + rps = self.client.get_providers_in_tree(self.context, + self.compute_uuid) + self.assertEqual([], rps) + # But get_provider_tree_and_ensure_root creates one (via + # _ensure_resource_provider) + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) - # So now we have a resource provider - rp = self.client._get_resource_provider(self.context, - self.compute_uuid) - self.assertIsNotNone(rp) - rps = self.client.get_providers_in_tree(self.context, - self.compute_uuid) - self.assertEqual(1, len(rps)) + # Now let's update status for our compute node. + self.client._ensure_resource_provider( + self.context, self.compute_uuid, name=self.compute_name) + self.client.set_inventory_for_provider( + self.context, self.compute_uuid, + self.compute_node_to_inventory_dict()) - # We should also have empty sets of aggregate and trait - # associations - self.assertEqual( - [], self.client._get_sharing_providers(self.context, - [uuids.agg])) - self.assertFalse( - self.client._provider_tree.have_aggregates_changed( - self.compute_uuid, [])) - self.assertFalse( - self.client._provider_tree.have_traits_changed( - self.compute_uuid, [])) + # So now we have a resource provider + rp = self.client._get_resource_provider(self.context, + self.compute_uuid) + self.assertIsNotNone(rp) + rps = self.client.get_providers_in_tree(self.context, + self.compute_uuid) + self.assertEqual(1, len(rps)) - # TODO(cdent): change this to use the methods built in - # to the report client to retrieve inventory? - inventory_url = ('/resource_providers/%s/inventories' % - self.compute_uuid) - resp = self.client.get(inventory_url) - inventory_data = resp.json()['inventories'] - self.assertEqual(self.compute_node.vcpus, - inventory_data[res_class]['total']) + # We should also have empty sets of aggregate and trait + # associations + self.assertEqual( + [], self.client._get_sharing_providers(self.context, + [uuids.agg])) + self.assertFalse( + self.client._provider_tree.have_aggregates_changed( + self.compute_uuid, [])) + self.assertFalse( + self.client._provider_tree.have_traits_changed( + self.compute_uuid, [])) - # Providers and inventory show up nicely in the provider tree - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) - self.assertTrue(ptree.has_inventory(self.compute_uuid)) + # TODO(cdent): change this to use the methods built in + # to the report client to retrieve inventory? + inventory_url = ('/resource_providers/%s/inventories' % + self.compute_uuid) + resp = self.client.get(inventory_url) + inventory_data = resp.json()['inventories'] + self.assertEqual(self.compute_node.vcpus, + inventory_data[res_class]['total']) - # Update allocations with our instance - alloc_dict = utils.resources_from_flavor(self.instance, - self.instance.flavor) - payload = { - "allocations": { - self.compute_uuid: {"resources": alloc_dict} - }, - "project_id": self.instance.project_id, - "user_id": self.instance.user_id, - "consumer_generation": None - } - self.client.put_allocations( - self.context, self.instance_uuid, payload) + # Providers and inventory show up nicely in the provider tree + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) + self.assertTrue(ptree.has_inventory(self.compute_uuid)) - # Check that allocations were made - resp = self.client.get('/allocations/%s' % self.instance_uuid) - alloc_data = resp.json()['allocations'] - vcpu_data = alloc_data[self.compute_uuid]['resources'][res_class] - self.assertEqual(2, vcpu_data) + # Update allocations with our instance + alloc_dict = utils.resources_from_flavor(self.instance, + self.instance.flavor) + payload = { + "allocations": { + self.compute_uuid: {"resources": alloc_dict} + }, + "project_id": self.instance.project_id, + "user_id": self.instance.user_id, + "consumer_generation": None + } + self.client.put_allocations( + self.context, self.instance_uuid, payload) - # Check that usages are up to date - resp = self.client.get('/resource_providers/%s/usages' % - self.compute_uuid) - usage_data = resp.json()['usages'] - vcpu_data = usage_data[res_class] - self.assertEqual(2, vcpu_data) + # Check that allocations were made + resp = self.client.get('/allocations/%s' % self.instance_uuid) + alloc_data = resp.json()['allocations'] + vcpu_data = alloc_data[self.compute_uuid]['resources'][res_class] + self.assertEqual(2, vcpu_data) - # Delete allocations with our instance - self.client.delete_allocation_for_instance(self.context, - self.instance.uuid) + # Check that usages are up to date + resp = self.client.get('/resource_providers/%s/usages' % + self.compute_uuid) + usage_data = resp.json()['usages'] + vcpu_data = usage_data[res_class] + self.assertEqual(2, vcpu_data) - # No usage - resp = self.client.get('/resource_providers/%s/usages' % - self.compute_uuid) - usage_data = resp.json()['usages'] - vcpu_data = usage_data[res_class] - self.assertEqual(0, vcpu_data) + # Delete allocations with our instance + self.client.delete_allocation_for_instance(self.context, + self.instance.uuid) - # Allocation bumped the generation, so refresh to get the latest - self.client._refresh_and_get_inventory(self.context, - self.compute_uuid) + # No usage + resp = self.client.get('/resource_providers/%s/usages' % + self.compute_uuid) + usage_data = resp.json()['usages'] + vcpu_data = usage_data[res_class] + self.assertEqual(0, vcpu_data) - # Trigger the reporting client deleting all inventory by setting - # the compute node's CPU, RAM and disk amounts to 0. - self.compute_node.vcpus = 0 - self.compute_node.memory_mb = 0 - self.compute_node.local_gb = 0 - self.client.set_inventory_for_provider( - self.context, self.compute_uuid, - self.compute_node_to_inventory_dict()) + # Allocation bumped the generation, so refresh to get the latest + self.client._refresh_and_get_inventory(self.context, + self.compute_uuid) - # Check there's no more inventory records - resp = self.client.get(inventory_url) - inventory_data = resp.json()['inventories'] - self.assertEqual({}, inventory_data) + # Trigger the reporting client deleting all inventory by setting + # the compute node's CPU, RAM and disk amounts to 0. + self.compute_node.vcpus = 0 + self.compute_node.memory_mb = 0 + self.compute_node.local_gb = 0 + self.client.set_inventory_for_provider( + self.context, self.compute_uuid, + self.compute_node_to_inventory_dict()) - # Build the provider tree afresh. - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - # The compute node is still there - self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) - # But the inventory is gone - self.assertFalse(ptree.has_inventory(self.compute_uuid)) + # Check there's no more inventory records + resp = self.client.get(inventory_url) + inventory_data = resp.json()['inventories'] + self.assertEqual({}, inventory_data) + + # Build the provider tree afresh. + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + # The compute node is still there + self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) + # But the inventory is gone + self.assertFalse(ptree.has_inventory(self.compute_uuid)) def test_global_request_id(self): global_request_id = 'req-%s' % uuids.global_request_id - def assert_app(environ, start_response): - # Assert the 'X-Openstack-Request-Id' header in the request. - self.assertIn('HTTP_X_OPENSTACK_REQUEST_ID', environ) + def fake_request(*args, **kwargs): self.assertEqual(global_request_id, - environ['HTTP_X_OPENSTACK_REQUEST_ID']) - start_response('204 OK', []) - return [] + kwargs['headers']['X-OpenStack-Request-ID']) - with self._interceptor(app=lambda: assert_app): + with mock.patch.object(self.client._client, 'request', + side_effect=fake_request): self.client._delete_provider(self.compute_uuid, global_request_id=global_request_id) payload = { @@ -377,409 +316,407 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase): sip(IP) x """ - with self._interceptor(): - # Register the compute node and its inventory + # Register the compute node and its inventory + self.client._ensure_resource_provider( + self.context, self.compute_uuid, name=self.compute_name) + self.client.set_inventory_for_provider( + self.context, self.compute_uuid, + self.compute_node_to_inventory_dict()) + # The compute node is associated with two of the shared storages + self.client.set_aggregates_for_provider( + self.context, self.compute_uuid, + set([uuids.agg_disk_1, uuids.agg_disk_2])) + + # Register two SR-IOV PFs with VF and bandwidth inventory + for x in (1, 2): + name = 'pf%d' % x + uuid = getattr(uuids, name) self.client._ensure_resource_provider( - self.context, self.compute_uuid, name=self.compute_name) + self.context, uuid, name=name, + parent_provider_uuid=self.compute_uuid) self.client.set_inventory_for_provider( - self.context, self.compute_uuid, - self.compute_node_to_inventory_dict()) - # The compute node is associated with two of the shared storages - self.client.set_aggregates_for_provider( - self.context, self.compute_uuid, - set([uuids.agg_disk_1, uuids.agg_disk_2])) - - # Register two SR-IOV PFs with VF and bandwidth inventory - for x in (1, 2): - name = 'pf%d' % x - uuid = getattr(uuids, name) - self.client._ensure_resource_provider( - self.context, uuid, name=name, - parent_provider_uuid=self.compute_uuid) - self.client.set_inventory_for_provider( - self.context, uuid, { - orc.SRIOV_NET_VF: { - 'total': 24 * x, - 'reserved': x, - 'min_unit': 1, - 'max_unit': 24 * x, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - 'CUSTOM_BANDWIDTH': { - 'total': 125000 * x, - 'reserved': 1000 * x, - 'min_unit': 5000, - 'max_unit': 25000 * x, - 'step_size': 5000, - 'allocation_ratio': 1.0, - }, - }) - # They're associated with an IP address aggregate - self.client.set_aggregates_for_provider(self.context, uuid, - [uuids.agg_ip]) - # Set some traits on 'em - self.client.set_traits_for_provider( - self.context, uuid, ['CUSTOM_PHYSNET_%d' % x]) - - # Register three shared storage pools with disk inventory - for x in (1, 2, 3): - name = 'ss%d' % x - uuid = getattr(uuids, name) - self.client._ensure_resource_provider(self.context, uuid, - name=name) - self.client.set_inventory_for_provider( - self.context, uuid, { - orc.DISK_GB: { - 'total': 100 * x, - 'reserved': x, - 'min_unit': 1, - 'max_unit': 10 * x, - 'step_size': 2, - 'allocation_ratio': 10.0, - }, - }) - # Mark as a sharing provider - self.client.set_traits_for_provider( - self.context, uuid, ['MISC_SHARES_VIA_AGGREGATE']) - # Associate each with its own aggregate. The compute node is - # associated with the first two (agg_disk_1 and agg_disk_2). - agg = getattr(uuids, 'agg_disk_%d' % x) - self.client.set_aggregates_for_provider(self.context, uuid, - [agg]) - - # Register a shared IP address provider with IP address inventory - self.client._ensure_resource_provider(self.context, uuids.sip, - name='sip') - self.client.set_inventory_for_provider( - self.context, uuids.sip, { - orc.IPV4_ADDRESS: { - 'total': 128, - 'reserved': 0, + self.context, uuid, { + orc.SRIOV_NET_VF: { + 'total': 24 * x, + 'reserved': x, 'min_unit': 1, - 'max_unit': 8, + 'max_unit': 24 * x, 'step_size': 1, 'allocation_ratio': 1.0, }, - }) - # Mark as a sharing provider, and add another trait - self.client.set_traits_for_provider( - self.context, uuids.sip, - set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO'])) - # It's associated with the same aggregate as both PFs - self.client.set_aggregates_for_provider(self.context, uuids.sip, - [uuids.agg_ip]) - - # Register a shared network bandwidth provider - self.client._ensure_resource_provider(self.context, uuids.sbw, - name='sbw') - self.client.set_inventory_for_provider( - self.context, uuids.sbw, { 'CUSTOM_BANDWIDTH': { - 'total': 1250000, - 'reserved': 10000, + 'total': 125000 * x, + 'reserved': 1000 * x, 'min_unit': 5000, - 'max_unit': 250000, + 'max_unit': 25000 * x, 'step_size': 5000, - 'allocation_ratio': 8.0, + 'allocation_ratio': 1.0, + }, + }) + # They're associated with an IP address aggregate + self.client.set_aggregates_for_provider(self.context, uuid, + [uuids.agg_ip]) + # Set some traits on 'em + self.client.set_traits_for_provider( + self.context, uuid, ['CUSTOM_PHYSNET_%d' % x]) + + # Register three shared storage pools with disk inventory + for x in (1, 2, 3): + name = 'ss%d' % x + uuid = getattr(uuids, name) + self.client._ensure_resource_provider(self.context, uuid, + name=name) + self.client.set_inventory_for_provider( + self.context, uuid, { + orc.DISK_GB: { + 'total': 100 * x, + 'reserved': x, + 'min_unit': 1, + 'max_unit': 10 * x, + 'step_size': 2, + 'allocation_ratio': 10.0, }, }) # Mark as a sharing provider self.client.set_traits_for_provider( - self.context, uuids.sbw, ['MISC_SHARES_VIA_AGGREGATE']) - # It's associated with some other aggregate. - self.client.set_aggregates_for_provider(self.context, uuids.sbw, - [uuids.agg_bw]) + self.context, uuid, ['MISC_SHARES_VIA_AGGREGATE']) + # Associate each with its own aggregate. The compute node is + # associated with the first two (agg_disk_1 and agg_disk_2). + agg = getattr(uuids, 'agg_disk_%d' % x) + self.client.set_aggregates_for_provider(self.context, uuid, + [agg]) - # Setup is done. Grab the ProviderTree - prov_tree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) + # Register a shared IP address provider with IP address inventory + self.client._ensure_resource_provider(self.context, uuids.sip, + name='sip') + self.client.set_inventory_for_provider( + self.context, uuids.sip, { + orc.IPV4_ADDRESS: { + 'total': 128, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 8, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + }) + # Mark as a sharing provider, and add another trait + self.client.set_traits_for_provider( + self.context, uuids.sip, + set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO'])) + # It's associated with the same aggregate as both PFs + self.client.set_aggregates_for_provider(self.context, uuids.sip, + [uuids.agg_ip]) - # All providers show up because we used _ensure_resource_provider - self.assertEqual(set([self.compute_uuid, uuids.ss1, uuids.ss2, - uuids.pf1, uuids.pf2, uuids.sip, uuids.ss3, - uuids.sbw]), - set(prov_tree.get_provider_uuids())) - # Narrow the field to just our compute subtree. - self.assertEqual( - set([self.compute_uuid, uuids.pf1, uuids.pf2]), - set(prov_tree.get_provider_uuids(self.compute_uuid))) + # Register a shared network bandwidth provider + self.client._ensure_resource_provider(self.context, uuids.sbw, + name='sbw') + self.client.set_inventory_for_provider( + self.context, uuids.sbw, { + 'CUSTOM_BANDWIDTH': { + 'total': 1250000, + 'reserved': 10000, + 'min_unit': 5000, + 'max_unit': 250000, + 'step_size': 5000, + 'allocation_ratio': 8.0, + }, + }) + # Mark as a sharing provider + self.client.set_traits_for_provider( + self.context, uuids.sbw, ['MISC_SHARES_VIA_AGGREGATE']) + # It's associated with some other aggregate. + self.client.set_aggregates_for_provider(self.context, uuids.sbw, + [uuids.agg_bw]) - # Validate traits for a couple of providers - self.assertFalse(prov_tree.have_traits_changed( - uuids.pf2, ['CUSTOM_PHYSNET_2'])) - self.assertFalse(prov_tree.have_traits_changed( - uuids.sip, ['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO'])) + # Setup is done. Grab the ProviderTree + prov_tree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) - # Validate aggregates for a couple of providers - self.assertFalse(prov_tree.have_aggregates_changed( - uuids.sbw, [uuids.agg_bw])) - self.assertFalse(prov_tree.have_aggregates_changed( - self.compute_uuid, [uuids.agg_disk_1, uuids.agg_disk_2])) + # All providers show up because we used _ensure_resource_provider + self.assertEqual(set([self.compute_uuid, uuids.ss1, uuids.ss2, + uuids.pf1, uuids.pf2, uuids.sip, uuids.ss3, + uuids.sbw]), + set(prov_tree.get_provider_uuids())) + # Narrow the field to just our compute subtree. + self.assertEqual( + set([self.compute_uuid, uuids.pf1, uuids.pf2]), + set(prov_tree.get_provider_uuids(self.compute_uuid))) + + # Validate traits for a couple of providers + self.assertFalse(prov_tree.have_traits_changed( + uuids.pf2, ['CUSTOM_PHYSNET_2'])) + self.assertFalse(prov_tree.have_traits_changed( + uuids.sip, ['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO'])) + + # Validate aggregates for a couple of providers + self.assertFalse(prov_tree.have_aggregates_changed( + uuids.sbw, [uuids.agg_bw])) + self.assertFalse(prov_tree.have_aggregates_changed( + self.compute_uuid, [uuids.agg_disk_1, uuids.agg_disk_2])) def test__set_inventory_reserved_eq_total(self): - with self._interceptor(latest_microversion=False): - # Create the provider - self.client._ensure_resource_provider(self.context, uuids.cn) + # Create the provider + self.client._ensure_resource_provider(self.context, uuids.cn) - # Make sure we can set reserved value equal to total - inv = { - orc.SRIOV_NET_VF: { - 'total': 24, - 'reserved': 24, - 'min_unit': 1, - 'max_unit': 24, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - } - self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - - def test_set_inventory_for_provider(self): - """Tests for SchedulerReportClient.set_inventory_for_provider. - """ - with self._interceptor(): - inv = { - orc.SRIOV_NET_VF: { - 'total': 24, - 'reserved': 1, - 'min_unit': 1, - 'max_unit': 24, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - } - # Provider doesn't exist in our cache - self.assertRaises( - ValueError, - self.client.set_inventory_for_provider, - self.context, uuids.cn, inv) - self.assertIsNone(self.client._get_inventory( - self.context, uuids.cn)) - - # Create the provider - self.client._ensure_resource_provider(self.context, uuids.cn) - # Still no inventory, but now we don't get a 404 - self.assertEqual( - {}, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - - # Now set the inventory - self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - - # Make sure we can change it - inv = { - orc.SRIOV_NET_VF: { - 'total': 24, - 'reserved': 1, - 'min_unit': 1, - 'max_unit': 24, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - orc.IPV4_ADDRESS: { - 'total': 128, - 'reserved': 0, - 'min_unit': 1, - 'max_unit': 8, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - } - self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - - # Create custom resource classes on the fly - self.assertFalse( - self.client.get('/resource_classes/CUSTOM_BANDWIDTH')) - inv = { - orc.SRIOV_NET_VF: { - 'total': 24, - 'reserved': 1, - 'min_unit': 1, - 'max_unit': 24, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - orc.IPV4_ADDRESS: { - 'total': 128, - 'reserved': 0, - 'min_unit': 1, - 'max_unit': 8, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - 'CUSTOM_BANDWIDTH': { - 'total': 1250000, - 'reserved': 10000, - 'min_unit': 5000, - 'max_unit': 250000, - 'step_size': 5000, - 'allocation_ratio': 8.0, - }, - } - self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - # The custom resource class got created. - self.assertTrue( - self.client.get('/resource_classes/CUSTOM_BANDWIDTH')) - - # Creating a bogus resource class raises the appropriate exception. - bogus_inv = dict(inv) - bogus_inv['CUSTOM_BOGU$$'] = { - 'total': 1, - 'reserved': 1, + # Make sure we can set reserved value equal to total + inv = { + orc.SRIOV_NET_VF: { + 'total': 24, + 'reserved': 24, 'min_unit': 1, - 'max_unit': 1, + 'max_unit': 24, 'step_size': 1, 'allocation_ratio': 1.0, - } - self.assertRaises( - exception.InvalidResourceClass, - self.client.set_inventory_for_provider, - self.context, uuids.cn, bogus_inv) - self.assertFalse( - self.client.get('/resource_classes/BOGUS')) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) + }, + } + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) - # Create a generation conflict by doing an "out of band" update - oob_inv = { - orc.IPV4_ADDRESS: { - 'total': 128, - 'reserved': 0, - 'min_unit': 1, - 'max_unit': 8, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - } - gen = self.client._provider_tree.data(uuids.cn).generation - self.assertTrue( - self.client.put( - '/resource_providers/%s/inventories' % uuids.cn, - {'resource_provider_generation': gen, - 'inventories': oob_inv})) - self.assertEqual( - oob_inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) + def test_set_inventory_for_provider(self): + """Tests for SchedulerReportClient.set_inventory_for_provider.""" + inv = { + orc.SRIOV_NET_VF: { + 'total': 24, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 24, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + } + # Provider doesn't exist in our cache + self.assertRaises( + ValueError, + self.client.set_inventory_for_provider, + self.context, uuids.cn, inv) + self.assertIsNone(self.client._get_inventory( + self.context, uuids.cn)) - # Now try to update again. - inv = { - orc.SRIOV_NET_VF: { - 'total': 24, - 'reserved': 1, - 'min_unit': 1, - 'max_unit': 24, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - 'CUSTOM_BANDWIDTH': { - 'total': 1250000, - 'reserved': 10000, - 'min_unit': 5000, - 'max_unit': 250000, - 'step_size': 5000, - 'allocation_ratio': 8.0, - }, - } - # Cached generation is off, so this will bounce with a conflict. - self.assertRaises( - exception.ResourceProviderUpdateConflict, - self.client.set_inventory_for_provider, - self.context, uuids.cn, inv) - # Inventory still corresponds to the out-of-band update - self.assertEqual( - oob_inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - # Force refresh to get the latest generation - self.client._refresh_and_get_inventory(self.context, uuids.cn) - # Now the update should work - self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - payload = { - "allocations": { - uuids.cn: {"resources": {orc.SRIOV_NET_VF: 1}} - }, - "project_id": uuids.proj, - "user_id": uuids.user, - "consumer_generation": None - } - # Now set up an InventoryInUse case by creating a VF allocation... - self.assertTrue( - self.client.put_allocations( - self.context, uuids.consumer, payload)) - # ...and trying to delete the provider's VF inventory - bad_inv = { - 'CUSTOM_BANDWIDTH': { - 'total': 1250000, - 'reserved': 10000, - 'min_unit': 5000, - 'max_unit': 250000, - 'step_size': 5000, - 'allocation_ratio': 8.0, - }, - } - # Allocation bumped the generation, so refresh to get the latest - self.client._refresh_and_get_inventory(self.context, uuids.cn) - msgre = (".*update conflict: Inventory for 'SRIOV_NET_VF' on " - "resource provider '%s' in use..*" % uuids.cn) - with self.assertRaisesRegex(exception.InventoryInUse, msgre): - self.client.set_inventory_for_provider(self.context, uuids.cn, - bad_inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) + # Create the provider + self.client._ensure_resource_provider(self.context, uuids.cn) + # Still no inventory, but now we don't get a 404 + self.assertEqual( + {}, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) - # Same result if we try to clear all the inventory - bad_inv = {} - with self.assertRaisesRegex(exception.InventoryInUse, msgre): - self.client.set_inventory_for_provider(self.context, uuids.cn, - bad_inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) + # Now set the inventory + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) - # Remove the allocation to make it work - self.client.delete('/allocations/' + uuids.consumer) - # Force refresh to get the latest generation - self.client._refresh_and_get_inventory(self.context, uuids.cn) - inv = {} - self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) + # Make sure we can change it + inv = { + orc.SRIOV_NET_VF: { + 'total': 24, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 24, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + orc.IPV4_ADDRESS: { + 'total': 128, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 8, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + } + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Create custom resource classes on the fly + self.assertFalse( + self.client.get('/resource_classes/CUSTOM_BANDWIDTH', + version='1.2')) + inv = { + orc.SRIOV_NET_VF: { + 'total': 24, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 24, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + orc.IPV4_ADDRESS: { + 'total': 128, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 8, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + 'CUSTOM_BANDWIDTH': { + 'total': 1250000, + 'reserved': 10000, + 'min_unit': 5000, + 'max_unit': 250000, + 'step_size': 5000, + 'allocation_ratio': 8.0, + }, + } + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + # The custom resource class got created. + self.assertTrue( + self.client.get('/resource_classes/CUSTOM_BANDWIDTH', + version='1.2')) + + # Creating a bogus resource class raises the appropriate exception. + bogus_inv = dict(inv) + bogus_inv['CUSTOM_BOGU$$'] = { + 'total': 1, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 1, + 'step_size': 1, + 'allocation_ratio': 1.0, + } + self.assertRaises( + exception.InvalidResourceClass, + self.client.set_inventory_for_provider, + self.context, uuids.cn, bogus_inv) + self.assertFalse( + self.client.get('/resource_classes/BOGUS')) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Create a generation conflict by doing an "out of band" update + oob_inv = { + orc.IPV4_ADDRESS: { + 'total': 128, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 8, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + } + gen = self.client._provider_tree.data(uuids.cn).generation + self.assertTrue( + self.client.put( + '/resource_providers/%s/inventories' % uuids.cn, + {'resource_provider_generation': gen, + 'inventories': oob_inv})) + self.assertEqual( + oob_inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Now try to update again. + inv = { + orc.SRIOV_NET_VF: { + 'total': 24, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 24, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + 'CUSTOM_BANDWIDTH': { + 'total': 1250000, + 'reserved': 10000, + 'min_unit': 5000, + 'max_unit': 250000, + 'step_size': 5000, + 'allocation_ratio': 8.0, + }, + } + # Cached generation is off, so this will bounce with a conflict. + self.assertRaises( + exception.ResourceProviderUpdateConflict, + self.client.set_inventory_for_provider, + self.context, uuids.cn, inv) + # Inventory still corresponds to the out-of-band update + self.assertEqual( + oob_inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + # Force refresh to get the latest generation + self.client._refresh_and_get_inventory(self.context, uuids.cn) + # Now the update should work + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + payload = { + "allocations": { + uuids.cn: {"resources": {orc.SRIOV_NET_VF: 1}} + }, + "project_id": uuids.proj, + "user_id": uuids.user, + "consumer_generation": None + } + # Now set up an InventoryInUse case by creating a VF allocation... + self.assertTrue( + self.client.put_allocations( + self.context, uuids.consumer, payload)) + # ...and trying to delete the provider's VF inventory + bad_inv = { + 'CUSTOM_BANDWIDTH': { + 'total': 1250000, + 'reserved': 10000, + 'min_unit': 5000, + 'max_unit': 250000, + 'step_size': 5000, + 'allocation_ratio': 8.0, + }, + } + # Allocation bumped the generation, so refresh to get the latest + self.client._refresh_and_get_inventory(self.context, uuids.cn) + msgre = (".*update conflict: Inventory for 'SRIOV_NET_VF' on " + "resource provider '%s' in use..*" % uuids.cn) + with self.assertRaisesRegex(exception.InventoryInUse, msgre): + self.client.set_inventory_for_provider(self.context, uuids.cn, + bad_inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Same result if we try to clear all the inventory + bad_inv = {} + with self.assertRaisesRegex(exception.InventoryInUse, msgre): + self.client.set_inventory_for_provider(self.context, uuids.cn, + bad_inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Remove the allocation to make it work + self.client.delete('/allocations/' + uuids.consumer) + # Force refresh to get the latest generation + self.client._refresh_and_get_inventory(self.context, uuids.cn) + inv = {} + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) def test_update_from_provider_tree(self): """A "realistic" walk through the lifecycle of a compute node provider @@ -804,205 +741,206 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase): self.assertFalse( new_tree.have_aggregates_changed(uuid, cdata.aggregates)) - # Do these with a failing interceptor to prove no API calls are made. - with self._interceptor(app=lambda: 'nuke') as client: + # Do these with a failing request method to prove no API calls are made + with mock.patch.object(self.placement_client, 'request', + mock.NonCallableMock()): # To begin with, the cache should be empty - self.assertEqual([], client._provider_tree.get_provider_uuids()) + self.assertEqual( + [], self.client._provider_tree.get_provider_uuids()) # When new_tree is empty, it's a no-op. - client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() - - with self._interceptor(): - # Populate with a provider with no inventories, aggregates, traits - new_tree.new_root('root', uuids.root) self.client.update_from_provider_tree(self.context, new_tree) assert_ptrees_equal() - # Throw in some more providers, in various spots in the tree, with - # some sub-properties - new_tree.new_child('child1', uuids.root, uuid=uuids.child1) - new_tree.update_aggregates('child1', [uuids.agg1, uuids.agg2]) - new_tree.new_child('grandchild1_1', uuids.child1, uuid=uuids.gc1_1) - new_tree.update_traits(uuids.gc1_1, ['CUSTOM_PHYSNET_2']) - new_tree.new_root('ssp', uuids.ssp) - new_tree.update_inventory('ssp', { - orc.DISK_GB: { - 'total': 100, - 'reserved': 1, - 'min_unit': 1, - 'max_unit': 10, - 'step_size': 2, - 'allocation_ratio': 10.0, - }, - }) - self.client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() + # Populate with a provider with no inventories, aggregates, traits + new_tree.new_root('root', uuids.root) + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() - # Swizzle properties - # Give the root some everything - new_tree.update_inventory(uuids.root, { - orc.VCPU: { - 'total': 10, - 'reserved': 0, - 'min_unit': 1, - 'max_unit': 2, - 'step_size': 1, - 'allocation_ratio': 10.0, - }, - orc.MEMORY_MB: { - 'total': 1048576, - 'reserved': 2048, - 'min_unit': 1024, - 'max_unit': 131072, - 'step_size': 1024, - 'allocation_ratio': 1.0, - }, - }) - new_tree.update_aggregates(uuids.root, [uuids.agg1]) - new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', - 'HW_CPU_X86_AVX2']) - # Take away the child's aggregates - new_tree.update_aggregates(uuids.child1, []) - # Grandchild gets some inventory - ipv4_inv = { - orc.IPV4_ADDRESS: { - 'total': 128, - 'reserved': 0, - 'min_unit': 1, - 'max_unit': 8, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - } - new_tree.update_inventory('grandchild1_1', ipv4_inv) - # Shared storage provider gets traits - new_tree.update_traits('ssp', set(['MISC_SHARES_VIA_AGGREGATE', - 'STORAGE_DISK_SSD'])) - self.client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() + # Throw in some more providers, in various spots in the tree, with + # some sub-properties + new_tree.new_child('child1', uuids.root, uuid=uuids.child1) + new_tree.update_aggregates('child1', [uuids.agg1, uuids.agg2]) + new_tree.new_child('grandchild1_1', uuids.child1, uuid=uuids.gc1_1) + new_tree.update_traits(uuids.gc1_1, ['CUSTOM_PHYSNET_2']) + new_tree.new_root('ssp', uuids.ssp) + new_tree.update_inventory('ssp', { + orc.DISK_GB: { + 'total': 100, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 10, + 'step_size': 2, + 'allocation_ratio': 10.0, + }, + }) + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() - # Let's go for some error scenarios. - # Add inventory in an invalid resource class - new_tree.update_inventory( - 'grandchild1_1', - dict(ipv4_inv, - MOTSUC_BANDWIDTH={ - 'total': 1250000, - 'reserved': 10000, - 'min_unit': 5000, - 'max_unit': 250000, - 'step_size': 5000, - 'allocation_ratio': 8.0, - })) + # Swizzle properties + # Give the root some everything + new_tree.update_inventory(uuids.root, { + orc.VCPU: { + 'total': 10, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 2, + 'step_size': 1, + 'allocation_ratio': 10.0, + }, + orc.MEMORY_MB: { + 'total': 1048576, + 'reserved': 2048, + 'min_unit': 1024, + 'max_unit': 131072, + 'step_size': 1024, + 'allocation_ratio': 1.0, + }, + }) + new_tree.update_aggregates(uuids.root, [uuids.agg1]) + new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', + 'HW_CPU_X86_AVX2']) + # Take away the child's aggregates + new_tree.update_aggregates(uuids.child1, []) + # Grandchild gets some inventory + ipv4_inv = { + orc.IPV4_ADDRESS: { + 'total': 128, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 8, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + } + new_tree.update_inventory('grandchild1_1', ipv4_inv) + # Shared storage provider gets traits + new_tree.update_traits('ssp', set(['MISC_SHARES_VIA_AGGREGATE', + 'STORAGE_DISK_SSD'])) + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() + + # Let's go for some error scenarios. + # Add inventory in an invalid resource class + new_tree.update_inventory( + 'grandchild1_1', + dict(ipv4_inv, + MOTSUC_BANDWIDTH={ + 'total': 1250000, + 'reserved': 10000, + 'min_unit': 5000, + 'max_unit': 250000, + 'step_size': 5000, + 'allocation_ratio': 8.0, + })) + self.assertRaises( + exception.ResourceProviderSyncFailed, + self.client.update_from_provider_tree, self.context, new_tree) + # The inventory update didn't get synced. + self.assertIsNone(self.client._get_inventory( + self.context, uuids.grandchild1_1)) + # We invalidated the cache for the entire tree around grandchild1_1 + # but did not invalidate the other root (the SSP) + self.assertEqual([uuids.ssp], + self.client._provider_tree.get_provider_uuids()) + # This is a little under-the-hood-looking, but make sure we cleared + # the association refresh timers for everything in the grandchild's + # tree + self.assertEqual(set([uuids.ssp]), + set(self.client._association_refresh_time)) + + # Fix that problem so we can try the next one + new_tree.update_inventory( + 'grandchild1_1', + dict(ipv4_inv, + CUSTOM_BANDWIDTH={ + 'total': 1250000, + 'reserved': 10000, + 'min_unit': 5000, + 'max_unit': 250000, + 'step_size': 5000, + 'allocation_ratio': 8.0, + })) + + # Add a bogus trait + new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', + 'HW_CPU_X86_AVX2', + 'MOTSUC_FOO']) + self.assertRaises( + exception.ResourceProviderSyncFailed, + self.client.update_from_provider_tree, self.context, new_tree) + # Placement didn't get updated + self.assertEqual(set(['HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2']), + self.client.get_provider_traits( + self.context, uuids.root).traits) + # ...and the root was removed from the cache + self.assertFalse(self.client._provider_tree.exists(uuids.root)) + + # Fix that problem + new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', + 'HW_CPU_X86_AVX2', + 'CUSTOM_FOO']) + + # Now the sync should work + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() + + # Let's cause a conflict error by doing an "out-of-band" update + gen = self.client._provider_tree.data(uuids.ssp).generation + self.assertTrue(self.client.put( + '/resource_providers/%s/traits' % uuids.ssp, + {'resource_provider_generation': gen, + 'traits': ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_HDD']}, + version='1.6')) + + # Now if we try to modify the traits, we should fail and invalidate + # the cache... + new_tree.update_traits(uuids.ssp, ['MISC_SHARES_VIA_AGGREGATE', + 'STORAGE_DISK_SSD', + 'CUSTOM_FAST']) + self.assertRaises( + exception.ResourceProviderUpdateConflict, + self.client.update_from_provider_tree, self.context, new_tree) + # ...but the next iteration will refresh the cache with the latest + # generation and so the next attempt should succeed. + self.client.update_from_provider_tree(self.context, new_tree) + # The out-of-band change is blown away, as it should be. + assert_ptrees_equal() + + # Let's delete some stuff + new_tree.remove(uuids.ssp) + self.assertFalse(new_tree.exists('ssp')) + + # Verify that placement communication failure raises through + with mock.patch.object(self.client, '_delete_provider', + side_effect=kse.EndpointNotFound): self.assertRaises( - exception.ResourceProviderSyncFailed, - self.client.update_from_provider_tree, self.context, new_tree) - # The inventory update didn't get synced. - self.assertIsNone(self.client._get_inventory( - self.context, uuids.grandchild1_1)) - # We invalidated the cache for the entire tree around grandchild1_1 - # but did not invalidate the other root (the SSP) - self.assertEqual([uuids.ssp], - self.client._provider_tree.get_provider_uuids()) - # This is a little under-the-hood-looking, but make sure we cleared - # the association refresh timers for everything in the grandchild's - # tree - self.assertEqual(set([uuids.ssp]), - set(self.client._association_refresh_time)) + kse.ClientException, + self.client.update_from_provider_tree, + self.context, new_tree) + # The provider didn't get deleted (this doesn't raise + # ResourceProviderNotFound) + self.client.get_provider_by_name(self.context, 'ssp') - # Fix that problem so we can try the next one - new_tree.update_inventory( - 'grandchild1_1', - dict(ipv4_inv, - CUSTOM_BANDWIDTH={ - 'total': 1250000, - 'reserved': 10000, - 'min_unit': 5000, - 'max_unit': 250000, - 'step_size': 5000, - 'allocation_ratio': 8.0, - })) + # Continue removing stuff + new_tree.remove('child1') + self.assertFalse(new_tree.exists('child1')) + # Removing a node removes its descendants too + self.assertFalse(new_tree.exists('grandchild1_1')) + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() - # Add a bogus trait - new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', - 'HW_CPU_X86_AVX2', - 'MOTSUC_FOO']) - self.assertRaises( - exception.ResourceProviderSyncFailed, - self.client.update_from_provider_tree, self.context, new_tree) - # Placement didn't get updated - self.assertEqual(set(['HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2']), - self.client.get_provider_traits( - self.context, uuids.root).traits) - # ...and the root was removed from the cache - self.assertFalse(self.client._provider_tree.exists(uuids.root)) + # Remove the last provider + new_tree.remove(uuids.root) + self.assertEqual([], new_tree.get_provider_uuids()) + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() - # Fix that problem - new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', - 'HW_CPU_X86_AVX2', - 'CUSTOM_FOO']) - - # Now the sync should work - self.client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() - - # Let's cause a conflict error by doing an "out-of-band" update - gen = self.client._provider_tree.data(uuids.ssp).generation - self.assertTrue(self.client.put( - '/resource_providers/%s/traits' % uuids.ssp, - {'resource_provider_generation': gen, - 'traits': ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_HDD']}, - version='1.6')) - - # Now if we try to modify the traits, we should fail and invalidate - # the cache... - new_tree.update_traits(uuids.ssp, ['MISC_SHARES_VIA_AGGREGATE', - 'STORAGE_DISK_SSD', - 'CUSTOM_FAST']) - self.assertRaises( - exception.ResourceProviderUpdateConflict, - self.client.update_from_provider_tree, self.context, new_tree) - # ...but the next iteration will refresh the cache with the latest - # generation and so the next attempt should succeed. - self.client.update_from_provider_tree(self.context, new_tree) - # The out-of-band change is blown away, as it should be. - assert_ptrees_equal() - - # Let's delete some stuff - new_tree.remove(uuids.ssp) - self.assertFalse(new_tree.exists('ssp')) - - # Verify that placement communication failure raises through - with mock.patch.object(self.client, '_delete_provider', - side_effect=kse.EndpointNotFound): - self.assertRaises( - kse.ClientException, - self.client.update_from_provider_tree, - self.context, new_tree) - # The provider didn't get deleted (this doesn't raise - # ResourceProviderNotFound) - self.client.get_provider_by_name(self.context, 'ssp') - - # Continue removing stuff - new_tree.remove('child1') - self.assertFalse(new_tree.exists('child1')) - # Removing a node removes its descendants too - self.assertFalse(new_tree.exists('grandchild1_1')) - self.client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() - - # Remove the last provider - new_tree.remove(uuids.root) - self.assertEqual([], new_tree.get_provider_uuids()) - self.client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() - - # Having removed the providers this way, they ought to be gone - # from placement - for uuid in (uuids.root, uuids.child1, uuids.grandchild1_1, - uuids.ssp): - resp = self.client.get('/resource_providers/%s' % uuid) - self.assertEqual(404, resp.status_code) + # Having removed the providers this way, they ought to be gone + # from placement + for uuid in (uuids.root, uuids.child1, uuids.grandchild1_1, + uuids.ssp): + resp = self.client.get('/resource_providers/%s' % uuid) + self.assertEqual(404, resp.status_code) def test_non_tree_aggregate_membership(self): """There are some methods of the reportclient that interact with the @@ -1014,63 +952,61 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase): sure it never gets populated (and we don't raise ValueError). """ agg_uuid = uuids.agg - with self._interceptor(): - # get_provider_tree_and_ensure_root creates a resource provider - # record for us - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid, name=self.compute_name) - self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) - # Now blow away the cache so we can ensure the use_cache=False - # behavior of aggregate_{add|remove}_host correctly ignores and/or - # doesn't attempt to populate/update it. - self.client._provider_tree.remove(self.compute_uuid) - self.assertEqual( - [], self.client._provider_tree.get_provider_uuids()) + # get_provider_tree_and_ensure_root creates a resource provider + # record for us + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid, name=self.compute_name) + self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) + # Now blow away the cache so we can ensure the use_cache=False + # behavior of aggregate_{add|remove}_host correctly ignores and/or + # doesn't attempt to populate/update it. + self.client._provider_tree.remove(self.compute_uuid) + self.assertEqual( + [], self.client._provider_tree.get_provider_uuids()) - # Use the reportclient's _get_provider_aggregates() private method - # to verify no aggregates are yet associated with this provider - aggs = self.client._get_provider_aggregates( - self.context, self.compute_uuid).aggregates - self.assertEqual(set(), aggs) + # Use the reportclient's _get_provider_aggregates() private method + # to verify no aggregates are yet associated with this provider + aggs = self.client._get_provider_aggregates( + self.context, self.compute_uuid).aggregates + self.assertEqual(set(), aggs) - # Now associate the compute **host name** with an aggregate and - # ensure the aggregate association is saved properly - self.client.aggregate_add_host( - self.context, agg_uuid, host_name=self.compute_name) + # Now associate the compute **host name** with an aggregate and + # ensure the aggregate association is saved properly + self.client.aggregate_add_host( + self.context, agg_uuid, host_name=self.compute_name) - # Check that the ProviderTree cache hasn't been modified (since - # the aggregate_add_host() method is only called from nova-api and - # we don't want to have a ProviderTree cache at that layer. - self.assertEqual( - [], self.client._provider_tree.get_provider_uuids()) - aggs = self.client._get_provider_aggregates( - self.context, self.compute_uuid).aggregates - self.assertEqual(set([agg_uuid]), aggs) + # Check that the ProviderTree cache hasn't been modified (since + # the aggregate_add_host() method is only called from nova-api and + # we don't want to have a ProviderTree cache at that layer. + self.assertEqual( + [], self.client._provider_tree.get_provider_uuids()) + aggs = self.client._get_provider_aggregates( + self.context, self.compute_uuid).aggregates + self.assertEqual(set([agg_uuid]), aggs) - # Finally, remove the association and verify it's removed in - # placement - self.client.aggregate_remove_host( - self.context, agg_uuid, self.compute_name) - self.assertEqual( - [], self.client._provider_tree.get_provider_uuids()) - aggs = self.client._get_provider_aggregates( - self.context, self.compute_uuid).aggregates - self.assertEqual(set(), aggs) + # Finally, remove the association and verify it's removed in + # placement + self.client.aggregate_remove_host( + self.context, agg_uuid, self.compute_name) + self.assertEqual( + [], self.client._provider_tree.get_provider_uuids()) + aggs = self.client._get_provider_aggregates( + self.context, self.compute_uuid).aggregates + self.assertEqual(set(), aggs) - # Try removing the same host and verify no error - self.client.aggregate_remove_host( - self.context, agg_uuid, self.compute_name) - self.assertEqual( - [], self.client._provider_tree.get_provider_uuids()) + # Try removing the same host and verify no error + self.client.aggregate_remove_host( + self.context, agg_uuid, self.compute_name) + self.assertEqual( + [], self.client._provider_tree.get_provider_uuids()) def test_alloc_cands_smoke(self): """Simple call to get_allocation_candidates for version checking.""" flavor = objects.Flavor( vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0) req_spec = objects.RequestSpec(flavor=flavor, is_bfv=False) - with self._interceptor(): - self.client.get_allocation_candidates( - self.context, utils.ResourceRequest(req_spec)) + self.client.get_allocation_candidates( + self.context, utils.ResourceRequest(req_spec)) def _set_up_provider_tree(self): r"""Create two compute nodes in placement ("this" one, and another one) @@ -1092,8 +1028,6 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase): | SRIOV_NET_VF=4| | SRIOV_NET_VF=4| |aggs: [uuids.agg1] | +-------------------+ +-------------------+ +-------------------+ - Must be invoked from within an _interceptor() context. - Returns a dict, keyed by provider UUID, of the expected shape of the provider tree, as expected by the expected_dict param of assertProviderTree. @@ -1210,10 +1144,7 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase): (k, uuid)) def _set_up_provider_tree_allocs(self): - """Create some allocations on our compute (with sharing). - - Must be invoked from within an _interceptor() context. - """ + """Create some allocations on our compute (with sharing).""" ret = { uuids.cn_inst1: { 'allocations': { @@ -1288,19 +1219,18 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase): 'resources_DISK:DISK_GB': 10 }) req_spec = objects.RequestSpec(flavor=flavor, is_bfv=False) - with self._interceptor(): - self._set_up_provider_tree() - acs = self.client.get_allocation_candidates( - self.context, utils.ResourceRequest(req_spec))[0] - # We're not going to validate all the allocations - Placement has - # tests for that - just make sure they're there. - self.assertEqual(3, len(acs)) - # We're not going to validate all the mappings - Placement has - # tests for that - just make sure they're there. - for ac in acs: - self.assertIn('allocations', ac) - self.assertEqual({'_CPU', '_MEM', '_DISK'}, - set(ac['mappings'])) + self._set_up_provider_tree() + acs = self.client.get_allocation_candidates( + self.context, utils.ResourceRequest(req_spec))[0] + # We're not going to validate all the allocations - Placement has + # tests for that - just make sure they're there. + self.assertEqual(3, len(acs)) + # We're not going to validate all the mappings - Placement has + # tests for that - just make sure they're there. + for ac in acs: + self.assertIn('allocations', ac) + self.assertEqual({'_CPU', '_MEM', '_DISK'}, + set(ac['mappings'])) # One data element is: # root_required: set of traits for root_required @@ -1348,246 +1278,242 @@ class SchedulerReportClientTests(SchedulerReportClientTestBase): req_spec = objects.RequestSpec(flavor=flavor, is_bfv=False) req_spec.root_required.update(data['root_required']) req_spec.root_forbidden.update(data['root_forbidden']) - with self._interceptor(): - self._set_up_provider_tree() - self.client.set_traits_for_provider( - self.context, self.compute_uuid, - (ot.COMPUTE_STATUS_DISABLED, ot.COMPUTE_VOLUME_EXTEND, - 'CUSTOM_FOO')) - acs, _, ver = self.client.get_allocation_candidates( - self.context, utils.ResourceRequest(req_spec)) - self.assertEqual('1.35', ver) - # This prints which ddt permutation we're using if it fails. - self.assertEqual(data['expected_acs'], len(acs), data) + self._set_up_provider_tree() + self.client.set_traits_for_provider( + self.context, self.compute_uuid, + (ot.COMPUTE_STATUS_DISABLED, ot.COMPUTE_VOLUME_EXTEND, + 'CUSTOM_FOO')) + acs, _, ver = self.client.get_allocation_candidates( + self.context, utils.ResourceRequest(req_spec)) + self.assertEqual('1.35', ver) + # This prints which ddt permutation we're using if it fails. + self.assertEqual(data['expected_acs'], len(acs), data) def test_get_allocations_for_provider_tree(self): - with self._interceptor(): - # When the provider tree cache is empty (or we otherwise supply a - # bogus node name), we get ValueError. - self.assertRaises(ValueError, - self.client.get_allocations_for_provider_tree, - self.context, 'bogus') + # When the provider tree cache is empty (or we otherwise supply a + # bogus node name), we get ValueError. + self.assertRaises(ValueError, + self.client.get_allocations_for_provider_tree, + self.context, 'bogus') - self._set_up_provider_tree() + self._set_up_provider_tree() - # At this point, there are no allocations - self.assertEqual({}, self.client.get_allocations_for_provider_tree( - self.context, self.compute_name)) + # At this point, there are no allocations + self.assertEqual({}, self.client.get_allocations_for_provider_tree( + self.context, self.compute_name)) - expected = self._set_up_provider_tree_allocs() + expected = self._set_up_provider_tree_allocs() - # And now we should get all the right allocations. Note that we see - # nothing from othercn_inst. - actual = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertAllocations(expected, actual) + # And now we should get all the right allocations. Note that we see + # nothing from othercn_inst. + actual = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertAllocations(expected, actual) def test_reshape(self): """Smoke test the report client shim for the reshaper API.""" - with self._interceptor(): - # Simulate placement API communication failure - with mock.patch.object( - self.client, 'post', side_effect=kse.MissingAuthPlugin): - self.assertRaises(kse.ClientException, - self.client._reshape, self.context, {}, {}) + # Simulate placement API communication failure + with mock.patch.object( + self.client, 'post', side_effect=kse.MissingAuthPlugin): + self.assertRaises(kse.ClientException, + self.client._reshape, self.context, {}, {}) - # Invalid payload (empty inventories) results in a 409, which the - # report client converts to ReshapeFailed - try: - self.client._reshape(self.context, {}, {}) - except exception.ReshapeFailed as e: - self.assertIn('JSON does not validate: {} does not have ' - 'enough properties', e.kwargs['error']) + # Invalid payload (empty inventories) results in a 409, which the + # report client converts to ReshapeFailed + try: + self.client._reshape(self.context, {}, {}) + except exception.ReshapeFailed as e: + self.assertIn('JSON does not validate: {} does not have ' + 'enough properties', e.kwargs['error']) - # Okay, do some real stuffs. We're just smoke-testing that we can - # hit a good path to the API here; real testing of the API happens - # in gabbits and via update_from_provider_tree. - self._set_up_provider_tree() - self._set_up_provider_tree_allocs() - # Updating allocations bumps generations for affected providers. - # In real life, the subsequent update_from_provider_tree will - # bounce 409, the cache will be cleared, and the operation will be - # retried. We don't care about any of that retry logic in the scope - # of this test case, so just clear the cache so - # get_provider_tree_and_ensure_root repopulates it and we avoid the - # conflict exception. - self.client.clear_provider_cache() + # Okay, do some real stuffs. We're just smoke-testing that we can + # hit a good path to the API here; real testing of the API happens + # in gabbits and via update_from_provider_tree. + self._set_up_provider_tree() + self._set_up_provider_tree_allocs() + # Updating allocations bumps generations for affected providers. + # In real life, the subsequent update_from_provider_tree will + # bounce 409, the cache will be cleared, and the operation will be + # retried. We don't care about any of that retry logic in the scope + # of this test case, so just clear the cache so + # get_provider_tree_and_ensure_root repopulates it and we avoid the + # conflict exception. + self.client.clear_provider_cache() - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - inventories = {} - for rp_uuid in ptree.get_provider_uuids(): - data = ptree.data(rp_uuid) - # Add a new resource class to the inventories - inventories[rp_uuid] = { - "inventories": dict(data.inventory, - CUSTOM_FOO={'total': 10}), - "resource_provider_generation": data.generation - } + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + inventories = {} + for rp_uuid in ptree.get_provider_uuids(): + data = ptree.data(rp_uuid) + # Add a new resource class to the inventories + inventories[rp_uuid] = { + "inventories": dict(data.inventory, + CUSTOM_FOO={'total': 10}), + "resource_provider_generation": data.generation + } - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - for alloc in allocs.values(): - for res in alloc['allocations'].values(): - res['resources']['CUSTOM_FOO'] = 1 + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + for alloc in allocs.values(): + for res in alloc['allocations'].values(): + res['resources']['CUSTOM_FOO'] = 1 - resp = self.client._reshape(self.context, inventories, allocs) - self.assertEqual(204, resp.status_code) + resp = self.client._reshape(self.context, inventories, allocs) + self.assertEqual(204, resp.status_code) def test_update_from_provider_tree_reshape(self): """Run update_from_provider_tree with reshaping.""" - with self._interceptor(): - exp_ptree = self._set_up_provider_tree() - # Save a copy of this for later - orig_exp_ptree = copy.deepcopy(exp_ptree) + exp_ptree = self._set_up_provider_tree() + # Save a copy of this for later + orig_exp_ptree = copy.deepcopy(exp_ptree) - # A null reshape: no inv changes, empty allocs - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(exp_ptree, ptree) - self.assertAllocations({}, allocs) - self.client.update_from_provider_tree(self.context, ptree, - allocations=allocs) + # A null reshape: no inv changes, empty allocs + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(exp_ptree, ptree) + self.assertAllocations({}, allocs) + self.client.update_from_provider_tree(self.context, ptree, + allocations=allocs) - exp_allocs = self._set_up_provider_tree_allocs() - # Save a copy of this for later - orig_exp_allocs = copy.deepcopy(exp_allocs) - # Updating allocations bumps generations for affected providers. - # In real life, the subsequent update_from_provider_tree will - # bounce 409, the cache will be cleared, and the operation will be - # retried. We don't care about any of that retry logic in the scope - # of this test case, so just clear the cache so - # get_provider_tree_and_ensure_root repopulates it and we avoid the - # conflict exception. - self.client.clear_provider_cache() - # Another null reshape: no inv changes, no alloc changes - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(exp_ptree, ptree) - self.assertAllocations(exp_allocs, allocs) - self.client.update_from_provider_tree(self.context, ptree, - allocations=allocs) + exp_allocs = self._set_up_provider_tree_allocs() + # Save a copy of this for later + orig_exp_allocs = copy.deepcopy(exp_allocs) + # Updating allocations bumps generations for affected providers. + # In real life, the subsequent update_from_provider_tree will + # bounce 409, the cache will be cleared, and the operation will be + # retried. We don't care about any of that retry logic in the scope + # of this test case, so just clear the cache so + # get_provider_tree_and_ensure_root repopulates it and we avoid the + # conflict exception. + self.client.clear_provider_cache() + # Another null reshape: no inv changes, no alloc changes + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(exp_ptree, ptree) + self.assertAllocations(exp_allocs, allocs) + self.client.update_from_provider_tree(self.context, ptree, + allocations=allocs) - # Now a reshape that adds an inventory item to all the providers in - # the provider tree (i.e. the "local" ones and the shared one, but - # not the othercn); and an allocation of that resource only for the - # local instances, and only on providers that already have - # allocations (i.e. the compute node and sharing provider for both - # cn_inst*, and numa1 for cn_inst1 and numa2 for cn_inst2). - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(exp_ptree, ptree) - self.assertAllocations(exp_allocs, allocs) - for rp_uuid in ptree.get_provider_uuids(): - # Add a new resource class to the inventories - ptree.update_inventory( - rp_uuid, dict(ptree.data(rp_uuid).inventory, - CUSTOM_FOO={'total': 10})) - exp_ptree[rp_uuid]['inventory']['CUSTOM_FOO'] = { - 'total': 10} - for c_uuid, alloc in allocs.items(): - for rp_uuid, res in alloc['allocations'].items(): - res['resources']['CUSTOM_FOO'] = 1 - exp_allocs[c_uuid]['allocations'][rp_uuid][ - 'resources']['CUSTOM_FOO'] = 1 - self.client.update_from_provider_tree(self.context, ptree, - allocations=allocs) + # Now a reshape that adds an inventory item to all the providers in + # the provider tree (i.e. the "local" ones and the shared one, but + # not the othercn); and an allocation of that resource only for the + # local instances, and only on providers that already have + # allocations (i.e. the compute node and sharing provider for both + # cn_inst*, and numa1 for cn_inst1 and numa2 for cn_inst2). + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(exp_ptree, ptree) + self.assertAllocations(exp_allocs, allocs) + for rp_uuid in ptree.get_provider_uuids(): + # Add a new resource class to the inventories + ptree.update_inventory( + rp_uuid, dict(ptree.data(rp_uuid).inventory, + CUSTOM_FOO={'total': 10})) + exp_ptree[rp_uuid]['inventory']['CUSTOM_FOO'] = { + 'total': 10} + for c_uuid, alloc in allocs.items(): + for rp_uuid, res in alloc['allocations'].items(): + res['resources']['CUSTOM_FOO'] = 1 + exp_allocs[c_uuid]['allocations'][rp_uuid][ + 'resources']['CUSTOM_FOO'] = 1 + self.client.update_from_provider_tree(self.context, ptree, + allocations=allocs) - # Let's do a big transform that stuffs everything back onto the - # compute node - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(exp_ptree, ptree) - self.assertAllocations(exp_allocs, allocs) - cum_inv = {} - for rp_uuid in ptree.get_provider_uuids(): - # Accumulate all the inventory amounts for each RC - for rc, inv in ptree.data(rp_uuid).inventory.items(): - if rc not in cum_inv: - cum_inv[rc] = {'total': 0} - cum_inv[rc]['total'] += inv['total'] - # Remove all the providers except the compute node and the - # shared storage provider, which still has (and shall - # retain) allocations from the "other" compute node. - # TODO(efried): But is that right? I should be able to - # remove the SSP from *this* tree and have it continue to - # exist in the world. But how would ufpt distinguish? - if rp_uuid not in (self.compute_uuid, uuids.ssp): - ptree.remove(rp_uuid) - # Put the accumulated inventory onto the compute RP - ptree.update_inventory(self.compute_uuid, cum_inv) - # Cause trait and aggregate transformations too. - ptree.update_aggregates(self.compute_uuid, set()) - ptree.update_traits(self.compute_uuid, ['CUSTOM_ALL_IN_ONE']) - exp_ptree = { - self.compute_uuid: dict( - parent_uuid = None, - inventory = cum_inv, - aggregates=set(), - traits = set(['CUSTOM_ALL_IN_ONE']), - ), - uuids.ssp: dict( - # Don't really care about the details - parent_uuid=None, - ), - } + # Let's do a big transform that stuffs everything back onto the + # compute node + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(exp_ptree, ptree) + self.assertAllocations(exp_allocs, allocs) + cum_inv = {} + for rp_uuid in ptree.get_provider_uuids(): + # Accumulate all the inventory amounts for each RC + for rc, inv in ptree.data(rp_uuid).inventory.items(): + if rc not in cum_inv: + cum_inv[rc] = {'total': 0} + cum_inv[rc]['total'] += inv['total'] + # Remove all the providers except the compute node and the + # shared storage provider, which still has (and shall + # retain) allocations from the "other" compute node. + # TODO(efried): But is that right? I should be able to + # remove the SSP from *this* tree and have it continue to + # exist in the world. But how would ufpt distinguish? + if rp_uuid not in (self.compute_uuid, uuids.ssp): + ptree.remove(rp_uuid) + # Put the accumulated inventory onto the compute RP + ptree.update_inventory(self.compute_uuid, cum_inv) + # Cause trait and aggregate transformations too. + ptree.update_aggregates(self.compute_uuid, set()) + ptree.update_traits(self.compute_uuid, ['CUSTOM_ALL_IN_ONE']) + exp_ptree = { + self.compute_uuid: dict( + parent_uuid = None, + inventory = cum_inv, + aggregates=set(), + traits = set(['CUSTOM_ALL_IN_ONE']), + ), + uuids.ssp: dict( + # Don't really care about the details + parent_uuid=None, + ), + } - # Let's inject an error path test here: attempting to reshape - # inventories without having moved their allocations should fail. - ex = self.assertRaises( - exception.ReshapeFailed, - self.client.update_from_provider_tree, self.context, ptree, - allocations=allocs) - self.assertIn('placement.inventory.inuse', ex.format_message()) + # Let's inject an error path test here: attempting to reshape + # inventories without having moved their allocations should fail. + ex = self.assertRaises( + exception.ReshapeFailed, + self.client.update_from_provider_tree, self.context, ptree, + allocations=allocs) + self.assertIn('placement.inventory.inuse', ex.format_message()) - # Move all the allocations off their existing providers and - # onto the compute node - for c_uuid, alloc in allocs.items(): - cum_allocs = {} - for rp_uuid, resources in alloc['allocations'].items(): - # Accumulate all the allocations for each RC - for rc, amount in resources['resources'].items(): - if rc not in cum_allocs: - cum_allocs[rc] = 0 - cum_allocs[rc] += amount - alloc['allocations'] = { - # Put the accumulated allocations on the compute RP - self.compute_uuid: {'resources': cum_allocs}} - exp_allocs = copy.deepcopy(allocs) - self.client.update_from_provider_tree(self.context, ptree, - allocations=allocs) + # Move all the allocations off their existing providers and + # onto the compute node + for c_uuid, alloc in allocs.items(): + cum_allocs = {} + for rp_uuid, resources in alloc['allocations'].items(): + # Accumulate all the allocations for each RC + for rc, amount in resources['resources'].items(): + if rc not in cum_allocs: + cum_allocs[rc] = 0 + cum_allocs[rc] += amount + alloc['allocations'] = { + # Put the accumulated allocations on the compute RP + self.compute_uuid: {'resources': cum_allocs}} + exp_allocs = copy.deepcopy(allocs) + self.client.update_from_provider_tree(self.context, ptree, + allocations=allocs) - # Okay, let's transform back now - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(exp_ptree, ptree) - self.assertAllocations(exp_allocs, allocs) - for rp_uuid, data in orig_exp_ptree.items(): - if not ptree.exists(rp_uuid): - # This should only happen for children, because the CN - # and SSP are already there. - ptree.new_child(data['name'], data['parent_uuid'], - uuid=rp_uuid) - ptree.update_inventory(rp_uuid, data['inventory']) - ptree.update_traits(rp_uuid, data['traits']) - ptree.update_aggregates(rp_uuid, data['aggregates']) - for c_uuid, orig_allocs in orig_exp_allocs.items(): - allocs[c_uuid]['allocations'] = orig_allocs['allocations'] - self.client.update_from_provider_tree(self.context, ptree, - allocations=allocs) - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(orig_exp_ptree, ptree) - self.assertAllocations(orig_exp_allocs, allocs) + # Okay, let's transform back now + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(exp_ptree, ptree) + self.assertAllocations(exp_allocs, allocs) + for rp_uuid, data in orig_exp_ptree.items(): + if not ptree.exists(rp_uuid): + # This should only happen for children, because the CN + # and SSP are already there. + ptree.new_child(data['name'], data['parent_uuid'], + uuid=rp_uuid) + ptree.update_inventory(rp_uuid, data['inventory']) + ptree.update_traits(rp_uuid, data['traits']) + ptree.update_aggregates(rp_uuid, data['aggregates']) + for c_uuid, orig_allocs in orig_exp_allocs.items(): + allocs[c_uuid]['allocations'] = orig_allocs['allocations'] + self.client.update_from_provider_tree(self.context, ptree, + allocations=allocs) + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(orig_exp_ptree, ptree) + self.assertAllocations(orig_exp_allocs, allocs)