From 2bd7df84dcf8a45ce92b82f2360a1b39df522297 Mon Sep 17 00:00:00 2001 From: Sylvain Bauza Date: Mon, 7 Aug 2017 13:03:37 +0200 Subject: [PATCH] Fix migrate single instance when it was created concurrently When a multiple-instance creation is requested by the user, we create a single RequestSpec per instance (each of them having the corresponding instance UUID) but we keep track of how many concurrent instances were created at once by updating a field named 'num_instances' and we persist it. Unfortunately, due to Ifc5cf482209e4f6f4e3e39b24389bd3563d86444 we now lookup that field in the scheduler for knowing how many allocations to do. Since a move operation will only pass a single instance UUID to migrate to the scheduler, there is a discrepancy that can lead to an ugly IndexError. Defaulting that loop to be what is passed over RPC unless we don't have it and then we default back to what we know from the RequestSpec record. Change-Id: If7da79356174be57481ef246618221e3b2ff8200 Closes-Bug: #1708961 --- nova/scheduler/filter_scheduler.py | 11 ++++++++--- .../unit/scheduler/test_filter_scheduler.py | 16 +++++++++++++--- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py index dfca443b6f..b2916d3176 100644 --- a/nova/scheduler/filter_scheduler.py +++ b/nova/scheduler/filter_scheduler.py @@ -117,8 +117,7 @@ class FilterScheduler(driver.Scheduler): :param context: The RequestContext object :param spec_obj: The RequestSpec object - :param instance_uuids: List of UUIDs, one for each value of the spec - object's num_instances attribute + :param instance_uuids: List of instance UUIDs to place or move. :param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider UUID, of the allocation requests that may be used to claim resources against @@ -159,7 +158,13 @@ class FilterScheduler(driver.Scheduler): claimed_instance_uuids = [] selected_hosts = [] - num_instances = spec_obj.num_instances + + # NOTE(sbauza): The RequestSpec.num_instances field contains the number + # of instances created when the RequestSpec was used to first boot some + # instances. This is incorrect when doing a move or resize operation, + # so prefer the length of instance_uuids unless it is None. + num_instances = (len(instance_uuids) if instance_uuids + else spec_obj.num_instances) for num in range(num_instances): hosts = self._get_sorted_hosts(spec_obj, hosts, num) if not hosts: diff --git a/nova/tests/unit/scheduler/test_filter_scheduler.py b/nova/tests/unit/scheduler/test_filter_scheduler.py index 1d40cc6315..681dcf71c3 100644 --- a/nova/tests/unit/scheduler/test_filter_scheduler.py +++ b/nova/tests/unit/scheduler/test_filter_scheduler.py @@ -147,10 +147,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): '_get_all_host_states') @mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.' '_get_sorted_hosts') - def test_schedule_successful_claim(self, mock_get_hosts, - mock_get_all_states, mock_claim): + def _test_schedule_successful_claim(self, mock_get_hosts, + mock_get_all_states, mock_claim, num_instances=1): spec_obj = objects.RequestSpec( - num_instances=1, + num_instances=num_instances, flavor=objects.Flavor(memory_mb=512, root_gb=512, ephemeral_gb=0, @@ -188,6 +188,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): # Ensure that we have consumed the resources on the chosen host states host_state.consume_from_request.assert_called_once_with(spec_obj) + def test_schedule_successful_claim(self): + self._test_schedule_successful_claim() + + def test_schedule_old_reqspec_and_move_operation(self): + """This test is for verifying that in case of a move operation with an + original RequestSpec created for 3 concurrent instances, we only verify + the instance that is moved. + """ + self._test_schedule_successful_claim(num_instances=3) + @mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.' '_cleanup_allocations') @mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'