From da7ffe72ef43c8b6aa31cccef84df05394322abb Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Fri, 21 Jun 2013 11:44:12 +0000 Subject: [PATCH] Initial scheduler support for instance_groups Scheduler support for blueprint instance-group-api-extension The scheduling code now uses the instance group object. This patch set adds support for 'anti-affinity' and 'affinity' policies. Co-authored-by: Russell Bryant Change-Id: I539424192727cf2a768d6b24ba9cf36dc56c9304 --- nova/scheduler/driver.py | 10 -- nova/scheduler/filter_scheduler.py | 44 ++++---- nova/scheduler/filters/affinity_filter.py | 10 ++ nova/tests/scheduler/test_filter_scheduler.py | 100 +++++++----------- nova/tests/scheduler/test_host_filters.py | 22 +++- nova/tests/scheduler/test_scheduler.py | 22 ++++ 6 files changed, 111 insertions(+), 97 deletions(-) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index ade4ef5cf8..4f8a78e625 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -116,16 +116,6 @@ class Scheduler(object): for service in services if self.servicegroup_api.service_is_up(service)] - def group_hosts(self, context, group): - """Return the list of hosts that have VM's from the group.""" - - # The system_metadata 'group' will be filtered - members = db.instance_get_all_by_filters(context, - {'deleted': False, 'system_metadata': {'group': group}}) - return [member['host'] - for member in members - if member.get('host') is not None] - def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py index 4441701099..62da2e6c02 100644 --- a/nova/scheduler/filter_scheduler.py +++ b/nova/scheduler/filter_scheduler.py @@ -25,6 +25,7 @@ from oslo.config import cfg from nova.compute import rpcapi as compute_rpcapi from nova import exception +from nova.objects import instance_group as instance_group_obj from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.pci import pci_request @@ -157,17 +158,9 @@ class FilterScheduler(driver.Scheduler): # Update the metadata if necessary scheduler_hints = filter_properties.get('scheduler_hints') or {} - group = scheduler_hints.get('group', None) - values = None - if group: - values = request_spec['instance_properties']['system_metadata'] - values.update({'group': group}) - values = {'system_metadata': values} - try: updated_instance = driver.instance_update_db(context, - instance_uuid, extra_values=values) - + instance_uuid) except exception.InstanceNotFound: LOG.warning(_("Instance disappeared during scheduling"), context=context, instance_uuid=instance_uuid) @@ -265,6 +258,24 @@ class FilterScheduler(driver.Scheduler): 'instance_uuid': instance_uuid}) raise exception.NoValidHost(reason=msg) + @staticmethod + def _setup_instance_group(context, filter_properties): + update_group_hosts = False + scheduler_hints = filter_properties.get('scheduler_hints') or {} + group_uuid = scheduler_hints.get('group', None) + if group_uuid: + group = instance_group_obj.InstanceGroup.get_by_uuid(context, + group_uuid) + policies = set(('anti-affinity', 'affinity')) + if any((policy in policies) for policy in group.policies): + update_group_hosts = True + filter_properties.setdefault('group_hosts', set()) + user_hosts = filter_properties['group_hosts'] + group_hosts = set(group.get_hosts(context)) + filter_properties['group_hosts'] = user_hosts | group_hosts + filter_properties['group_policies'] = group.policies + return update_group_hosts + def _schedule(self, context, request_spec, filter_properties, instance_uuids=None): """Returns a list of hosts that meet the required specs, @@ -274,17 +285,8 @@ class FilterScheduler(driver.Scheduler): instance_properties = request_spec['instance_properties'] instance_type = request_spec.get("instance_type", None) - # Get the group - update_group_hosts = False - scheduler_hints = filter_properties.get('scheduler_hints') or {} - group = scheduler_hints.get('group', None) - if group: - group_hosts = self.group_hosts(elevated, group) - update_group_hosts = True - if 'group_hosts' not in filter_properties: - filter_properties.update({'group_hosts': []}) - configured_hosts = filter_properties['group_hosts'] - filter_properties['group_hosts'] = configured_hosts + group_hosts + update_group_hosts = self._setup_instance_group(context, + filter_properties) config_options = self._get_configuration_options() @@ -348,7 +350,7 @@ class FilterScheduler(driver.Scheduler): # will change for the next instance. chosen_host.obj.consume_from_instance(instance_properties) if update_group_hosts is True: - filter_properties['group_hosts'].append(chosen_host.obj.host) + filter_properties['group_hosts'].add(chosen_host.obj.host) return selected_hosts def _get_all_host_states(self, context): diff --git a/nova/scheduler/filters/affinity_filter.py b/nova/scheduler/filters/affinity_filter.py index 9e72088a10..7135463e3e 100644 --- a/nova/scheduler/filters/affinity_filter.py +++ b/nova/scheduler/filters/affinity_filter.py @@ -105,6 +105,11 @@ class GroupAntiAffinityFilter(AffinityFilter): """ def host_passes(self, host_state, filter_properties): + # Only invoke the filter is 'anti-affinity' is configured + policies = filter_properties.get('group_policies', []) + if 'anti-affinity' not in policies: + return True + group_hosts = filter_properties.get('group_hosts') or [] LOG.debug(_("Group anti affinity: check if %(host)s not " "in %(configured)s"), {'host': host_state.host, @@ -121,6 +126,11 @@ class GroupAffinityFilter(AffinityFilter): """ def host_passes(self, host_state, filter_properties): + # Only invoke the filter is 'affinity' is configured + policies = filter_properties.get('group_policies', []) + if 'affinity' not in policies: + return True + group_hosts = filter_properties.get('group_hosts', []) LOG.debug(_("Group affinity: check if %(host)s in " "%(configured)s"), {'host': host_state.host, diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py index 3e9b45e6a6..c88d43933d 100644 --- a/nova/tests/scheduler/test_filter_scheduler.py +++ b/nova/tests/scheduler/test_filter_scheduler.py @@ -16,21 +16,26 @@ Tests For Filter Scheduler. """ +import contextlib +import mock +import uuid + import mox -from nova.compute import rpcapi as compute_rpcapi from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import api as conductor_api from nova import context from nova import db from nova import exception +from nova.objects import instance_group as instance_group_obj from nova.pci import pci_request from nova.scheduler import driver from nova.scheduler import filter_scheduler from nova.scheduler import host_manager from nova.scheduler import utils as scheduler_utils from nova.scheduler import weights +from nova.tests import fake_instance from nova.tests.scheduler import fakes from nova.tests.scheduler import test_scheduler @@ -49,6 +54,15 @@ def fake_get_group_filtered_hosts(hosts, filter_properties, index): return list(hosts) +def fake_get_group_filtered_affinity_hosts(hosts, filter_properties, index): + group_hosts = filter_properties.get('group_hosts') or [] + if group_hosts: + affinity_host = hosts.pop(0) + return [affinity_host] + else: + return list(hosts) + + class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): """Test case for Filter Scheduler.""" @@ -362,71 +376,35 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): self.assertEqual({'vcpus': 5}, host_state.limits) - def test_basic_schedule_run_instances_anti_affinity(self): - filter_properties = {'scheduler_hints': - {'group': 'cats'}} - # Request spec 1 - instance_opts1 = {'project_id': 1, 'os_type': 'Linux', - 'memory_mb': 512, 'root_gb': 512, - 'ephemeral_gb': 0, 'vcpus': 1, - 'system_metadata': {'system': 'metadata'}} - request_spec1 = {'instance_uuids': ['fake-uuid1-1', 'fake-uuid1-2'], - 'instance_properties': instance_opts1, - 'instance_type': {'memory_mb': 512, 'root_gb': 512, - 'ephemeral_gb': 0, 'vcpus': 1}} - self.next_weight = 1.0 - - def _fake_weigh_objects(_self, functions, hosts, options): - self.next_weight += 2.0 - host_state = hosts[0] - return [weights.WeighedHost(host_state, self.next_weight)] - + def test_group_details_in_filter_properties(self): sched = fakes.FakeFilterScheduler() - fake_context = context.RequestContext('user', 'project', - is_admin=True) + instance = fake_instance.fake_instance_obj(self.context, + params={'host': 'hostA'}) - self.stubs.Set(sched.host_manager, 'get_filtered_hosts', - fake_get_group_filtered_hosts) - self.stubs.Set(weights.HostWeightHandler, - 'get_weighed_objects', _fake_weigh_objects) - fakes.mox_host_manager_db_calls(self.mox, fake_context) + group = instance_group_obj.InstanceGroup() + group.uuid = str(uuid.uuid4()) + group.members = [instance.uuid] + group.policies = ['anti-affinity'] - self.mox.StubOutWithMock(driver, 'instance_update_db') - self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance') - self.mox.StubOutWithMock(sched, 'group_hosts') + filter_properties = { + 'scheduler_hints': { + 'group': group.uuid, + }, + } - instance1_1 = {'uuid': 'fake-uuid1-1'} - instance1_2 = {'uuid': 'fake-uuid1-2'} - - sched.group_hosts(mox.IgnoreArg(), 'cats').AndReturn([]) - - def inc_launch_index1(*args, **kwargs): - request_spec1['instance_properties']['launch_index'] = ( - request_spec1['instance_properties']['launch_index'] + 1) - - expected_metadata = {'system_metadata': - {'system': 'metadata', 'group': 'cats'}} - driver.instance_update_db(fake_context, instance1_1['uuid'], - extra_values=expected_metadata).WithSideEffects( - inc_launch_index1).AndReturn(instance1_1) - compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host3', - instance=instance1_1, requested_networks=None, - injected_files=None, admin_password=None, is_first_time=None, - request_spec=request_spec1, filter_properties=mox.IgnoreArg(), - node='node3', legacy_bdm_in_spec=False) - - driver.instance_update_db(fake_context, instance1_2['uuid'], - extra_values=expected_metadata).WithSideEffects( - inc_launch_index1).AndReturn(instance1_2) - compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host4', - instance=instance1_2, requested_networks=None, - injected_files=None, admin_password=None, is_first_time=None, - request_spec=request_spec1, filter_properties=mox.IgnoreArg(), - node='node4', legacy_bdm_in_spec=False) - self.mox.ReplayAll() - sched.schedule_run_instance(fake_context, request_spec1, - None, None, None, None, filter_properties, False) + with contextlib.nested( + mock.patch.object(instance_group_obj.InstanceGroup, 'get_by_uuid', + return_value=group), + mock.patch.object(instance_group_obj.InstanceGroup, 'get_hosts', + return_value=['hostA']), + ) as (get_group, get_hosts): + update_group_hosts = sched._setup_instance_group(self.context, + filter_properties) + self.assertTrue(update_group_hosts) + self.assertEqual(set(['hostA']), filter_properties['group_hosts']) + self.assertEqual(['anti-affinity'], + filter_properties['group_policies']) def test_schedule_host_pool(self): """Make sure the scheduler_host_subset_size property works properly.""" diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index 3d34004c0b..59e68b75dc 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -1558,27 +1558,39 @@ class HostFiltersTestCase(test.NoDBTestCase): def test_group_anti_affinity_filter_passes(self): filt_cls = self.class_map['GroupAntiAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) - filter_properties = {'group_hosts': []} + filter_properties = {} self.assertTrue(filt_cls.host_passes(host, filter_properties)) - filter_properties = {'group_hosts': ['host2']} + filter_properties = {'group_policies': ['affinity']} + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + filter_properties = {'group_policies': ['anti-affinity']} + filter_properties['group_hosts'] = [] + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + filter_properties['group_hosts'] = ['host2'] self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_group_anti_affinity_filter_fails(self): filt_cls = self.class_map['GroupAntiAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) - filter_properties = {'group_hosts': ['host1']} + filter_properties = {'group_policies': ['anti-affinity'], + 'group_hosts': ['host1']} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_group_affinity_filter_passes(self): filt_cls = self.class_map['GroupAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) - filter_properties = {'group_hosts': ['host1']} + filter_properties = {} + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + filter_properties = {'group_policies': ['anti-affinity']} + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + filter_properties = {'group_policies': ['affinity'], + 'group_hosts': ['host1']} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_group_affinity_filter_fails(self): filt_cls = self.class_map['GroupAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) - filter_properties = {'group_hosts': ['host2']} + filter_properties = {'group_policies': ['affinity'], + 'group_hosts': ['host2']} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_aggregate_multi_tenancy_isolation_with_meta_passes(self): diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index fa1a931c34..a72d6a80b7 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -532,3 +532,25 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase): def test_unimplemented_select_destinations(self): self.assertRaises(NotImplementedError, self.driver.select_destinations, self.context, {}, {}) + + +class SchedulerInstanceGroupData(test.TestCase): + + driver_cls = driver.Scheduler + + def setUp(self): + super(SchedulerInstanceGroupData, self).setUp() + self.user_id = 'fake_user' + self.project_id = 'fake_project' + self.context = context.RequestContext(self.user_id, self.project_id) + self.driver = self.driver_cls() + + def _get_default_values(self): + return {'name': 'fake_name', + 'user_id': self.user_id, + 'project_id': self.project_id} + + def _create_instance_group(self, context, values, policies=None, + metadata=None, members=None): + return db.instance_group_create(context, values, policies=policies, + metadata=metadata, members=members)