Initial scheduler support for instance_groups
Scheduler support for blueprint instance-group-api-extension The scheduling code now uses the instance group object. This patch set adds support for 'anti-affinity' and 'affinity' policies. Co-authored-by: Russell Bryant <rbryant@redhat.com> Change-Id: I539424192727cf2a768d6b24ba9cf36dc56c9304
This commit is contained in:
committed by
Russell Bryant
parent
633f0df009
commit
da7ffe72ef
@@ -116,16 +116,6 @@ class Scheduler(object):
|
||||
for service in services
|
||||
if self.servicegroup_api.service_is_up(service)]
|
||||
|
||||
def group_hosts(self, context, group):
|
||||
"""Return the list of hosts that have VM's from the group."""
|
||||
|
||||
# The system_metadata 'group' will be filtered
|
||||
members = db.instance_get_all_by_filters(context,
|
||||
{'deleted': False, 'system_metadata': {'group': group}})
|
||||
return [member['host']
|
||||
for member in members
|
||||
if member.get('host') is not None]
|
||||
|
||||
def schedule_run_instance(self, context, request_spec,
|
||||
admin_password, injected_files,
|
||||
requested_networks, is_first_time,
|
||||
|
||||
@@ -25,6 +25,7 @@ from oslo.config import cfg
|
||||
|
||||
from nova.compute import rpcapi as compute_rpcapi
|
||||
from nova import exception
|
||||
from nova.objects import instance_group as instance_group_obj
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.pci import pci_request
|
||||
@@ -157,17 +158,9 @@ class FilterScheduler(driver.Scheduler):
|
||||
|
||||
# Update the metadata if necessary
|
||||
scheduler_hints = filter_properties.get('scheduler_hints') or {}
|
||||
group = scheduler_hints.get('group', None)
|
||||
values = None
|
||||
if group:
|
||||
values = request_spec['instance_properties']['system_metadata']
|
||||
values.update({'group': group})
|
||||
values = {'system_metadata': values}
|
||||
|
||||
try:
|
||||
updated_instance = driver.instance_update_db(context,
|
||||
instance_uuid, extra_values=values)
|
||||
|
||||
instance_uuid)
|
||||
except exception.InstanceNotFound:
|
||||
LOG.warning(_("Instance disappeared during scheduling"),
|
||||
context=context, instance_uuid=instance_uuid)
|
||||
@@ -265,6 +258,24 @@ class FilterScheduler(driver.Scheduler):
|
||||
'instance_uuid': instance_uuid})
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
|
||||
@staticmethod
|
||||
def _setup_instance_group(context, filter_properties):
|
||||
update_group_hosts = False
|
||||
scheduler_hints = filter_properties.get('scheduler_hints') or {}
|
||||
group_uuid = scheduler_hints.get('group', None)
|
||||
if group_uuid:
|
||||
group = instance_group_obj.InstanceGroup.get_by_uuid(context,
|
||||
group_uuid)
|
||||
policies = set(('anti-affinity', 'affinity'))
|
||||
if any((policy in policies) for policy in group.policies):
|
||||
update_group_hosts = True
|
||||
filter_properties.setdefault('group_hosts', set())
|
||||
user_hosts = filter_properties['group_hosts']
|
||||
group_hosts = set(group.get_hosts(context))
|
||||
filter_properties['group_hosts'] = user_hosts | group_hosts
|
||||
filter_properties['group_policies'] = group.policies
|
||||
return update_group_hosts
|
||||
|
||||
def _schedule(self, context, request_spec, filter_properties,
|
||||
instance_uuids=None):
|
||||
"""Returns a list of hosts that meet the required specs,
|
||||
@@ -274,17 +285,8 @@ class FilterScheduler(driver.Scheduler):
|
||||
instance_properties = request_spec['instance_properties']
|
||||
instance_type = request_spec.get("instance_type", None)
|
||||
|
||||
# Get the group
|
||||
update_group_hosts = False
|
||||
scheduler_hints = filter_properties.get('scheduler_hints') or {}
|
||||
group = scheduler_hints.get('group', None)
|
||||
if group:
|
||||
group_hosts = self.group_hosts(elevated, group)
|
||||
update_group_hosts = True
|
||||
if 'group_hosts' not in filter_properties:
|
||||
filter_properties.update({'group_hosts': []})
|
||||
configured_hosts = filter_properties['group_hosts']
|
||||
filter_properties['group_hosts'] = configured_hosts + group_hosts
|
||||
update_group_hosts = self._setup_instance_group(context,
|
||||
filter_properties)
|
||||
|
||||
config_options = self._get_configuration_options()
|
||||
|
||||
@@ -348,7 +350,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
# will change for the next instance.
|
||||
chosen_host.obj.consume_from_instance(instance_properties)
|
||||
if update_group_hosts is True:
|
||||
filter_properties['group_hosts'].append(chosen_host.obj.host)
|
||||
filter_properties['group_hosts'].add(chosen_host.obj.host)
|
||||
return selected_hosts
|
||||
|
||||
def _get_all_host_states(self, context):
|
||||
|
||||
@@ -105,6 +105,11 @@ class GroupAntiAffinityFilter(AffinityFilter):
|
||||
"""
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
# Only invoke the filter is 'anti-affinity' is configured
|
||||
policies = filter_properties.get('group_policies', [])
|
||||
if 'anti-affinity' not in policies:
|
||||
return True
|
||||
|
||||
group_hosts = filter_properties.get('group_hosts') or []
|
||||
LOG.debug(_("Group anti affinity: check if %(host)s not "
|
||||
"in %(configured)s"), {'host': host_state.host,
|
||||
@@ -121,6 +126,11 @@ class GroupAffinityFilter(AffinityFilter):
|
||||
"""
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
# Only invoke the filter is 'affinity' is configured
|
||||
policies = filter_properties.get('group_policies', [])
|
||||
if 'affinity' not in policies:
|
||||
return True
|
||||
|
||||
group_hosts = filter_properties.get('group_hosts', [])
|
||||
LOG.debug(_("Group affinity: check if %(host)s in "
|
||||
"%(configured)s"), {'host': host_state.host,
|
||||
|
||||
@@ -16,21 +16,26 @@
|
||||
Tests For Filter Scheduler.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import mock
|
||||
import uuid
|
||||
|
||||
import mox
|
||||
|
||||
from nova.compute import rpcapi as compute_rpcapi
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova.compute import vm_states
|
||||
from nova.conductor import api as conductor_api
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.objects import instance_group as instance_group_obj
|
||||
from nova.pci import pci_request
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import filter_scheduler
|
||||
from nova.scheduler import host_manager
|
||||
from nova.scheduler import utils as scheduler_utils
|
||||
from nova.scheduler import weights
|
||||
from nova.tests import fake_instance
|
||||
from nova.tests.scheduler import fakes
|
||||
from nova.tests.scheduler import test_scheduler
|
||||
|
||||
@@ -49,6 +54,15 @@ def fake_get_group_filtered_hosts(hosts, filter_properties, index):
|
||||
return list(hosts)
|
||||
|
||||
|
||||
def fake_get_group_filtered_affinity_hosts(hosts, filter_properties, index):
|
||||
group_hosts = filter_properties.get('group_hosts') or []
|
||||
if group_hosts:
|
||||
affinity_host = hosts.pop(0)
|
||||
return [affinity_host]
|
||||
else:
|
||||
return list(hosts)
|
||||
|
||||
|
||||
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
"""Test case for Filter Scheduler."""
|
||||
|
||||
@@ -362,71 +376,35 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
|
||||
self.assertEqual({'vcpus': 5}, host_state.limits)
|
||||
|
||||
def test_basic_schedule_run_instances_anti_affinity(self):
|
||||
filter_properties = {'scheduler_hints':
|
||||
{'group': 'cats'}}
|
||||
# Request spec 1
|
||||
instance_opts1 = {'project_id': 1, 'os_type': 'Linux',
|
||||
'memory_mb': 512, 'root_gb': 512,
|
||||
'ephemeral_gb': 0, 'vcpus': 1,
|
||||
'system_metadata': {'system': 'metadata'}}
|
||||
request_spec1 = {'instance_uuids': ['fake-uuid1-1', 'fake-uuid1-2'],
|
||||
'instance_properties': instance_opts1,
|
||||
'instance_type': {'memory_mb': 512, 'root_gb': 512,
|
||||
'ephemeral_gb': 0, 'vcpus': 1}}
|
||||
self.next_weight = 1.0
|
||||
|
||||
def _fake_weigh_objects(_self, functions, hosts, options):
|
||||
self.next_weight += 2.0
|
||||
host_state = hosts[0]
|
||||
return [weights.WeighedHost(host_state, self.next_weight)]
|
||||
|
||||
def test_group_details_in_filter_properties(self):
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
fake_context = context.RequestContext('user', 'project',
|
||||
is_admin=True)
|
||||
instance = fake_instance.fake_instance_obj(self.context,
|
||||
params={'host': 'hostA'})
|
||||
|
||||
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
|
||||
fake_get_group_filtered_hosts)
|
||||
self.stubs.Set(weights.HostWeightHandler,
|
||||
'get_weighed_objects', _fake_weigh_objects)
|
||||
fakes.mox_host_manager_db_calls(self.mox, fake_context)
|
||||
group = instance_group_obj.InstanceGroup()
|
||||
group.uuid = str(uuid.uuid4())
|
||||
group.members = [instance.uuid]
|
||||
group.policies = ['anti-affinity']
|
||||
|
||||
self.mox.StubOutWithMock(driver, 'instance_update_db')
|
||||
self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
|
||||
self.mox.StubOutWithMock(sched, 'group_hosts')
|
||||
filter_properties = {
|
||||
'scheduler_hints': {
|
||||
'group': group.uuid,
|
||||
},
|
||||
}
|
||||
|
||||
instance1_1 = {'uuid': 'fake-uuid1-1'}
|
||||
instance1_2 = {'uuid': 'fake-uuid1-2'}
|
||||
|
||||
sched.group_hosts(mox.IgnoreArg(), 'cats').AndReturn([])
|
||||
|
||||
def inc_launch_index1(*args, **kwargs):
|
||||
request_spec1['instance_properties']['launch_index'] = (
|
||||
request_spec1['instance_properties']['launch_index'] + 1)
|
||||
|
||||
expected_metadata = {'system_metadata':
|
||||
{'system': 'metadata', 'group': 'cats'}}
|
||||
driver.instance_update_db(fake_context, instance1_1['uuid'],
|
||||
extra_values=expected_metadata).WithSideEffects(
|
||||
inc_launch_index1).AndReturn(instance1_1)
|
||||
compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host3',
|
||||
instance=instance1_1, requested_networks=None,
|
||||
injected_files=None, admin_password=None, is_first_time=None,
|
||||
request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
|
||||
node='node3', legacy_bdm_in_spec=False)
|
||||
|
||||
driver.instance_update_db(fake_context, instance1_2['uuid'],
|
||||
extra_values=expected_metadata).WithSideEffects(
|
||||
inc_launch_index1).AndReturn(instance1_2)
|
||||
compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host4',
|
||||
instance=instance1_2, requested_networks=None,
|
||||
injected_files=None, admin_password=None, is_first_time=None,
|
||||
request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
|
||||
node='node4', legacy_bdm_in_spec=False)
|
||||
self.mox.ReplayAll()
|
||||
sched.schedule_run_instance(fake_context, request_spec1,
|
||||
None, None, None, None, filter_properties, False)
|
||||
with contextlib.nested(
|
||||
mock.patch.object(instance_group_obj.InstanceGroup, 'get_by_uuid',
|
||||
return_value=group),
|
||||
mock.patch.object(instance_group_obj.InstanceGroup, 'get_hosts',
|
||||
return_value=['hostA']),
|
||||
) as (get_group, get_hosts):
|
||||
update_group_hosts = sched._setup_instance_group(self.context,
|
||||
filter_properties)
|
||||
self.assertTrue(update_group_hosts)
|
||||
self.assertEqual(set(['hostA']), filter_properties['group_hosts'])
|
||||
self.assertEqual(['anti-affinity'],
|
||||
filter_properties['group_policies'])
|
||||
|
||||
def test_schedule_host_pool(self):
|
||||
"""Make sure the scheduler_host_subset_size property works properly."""
|
||||
|
||||
@@ -1558,27 +1558,39 @@ class HostFiltersTestCase(test.NoDBTestCase):
|
||||
def test_group_anti_affinity_filter_passes(self):
|
||||
filt_cls = self.class_map['GroupAntiAffinityFilter']()
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
filter_properties = {'group_hosts': []}
|
||||
filter_properties = {}
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
filter_properties = {'group_hosts': ['host2']}
|
||||
filter_properties = {'group_policies': ['affinity']}
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
filter_properties = {'group_policies': ['anti-affinity']}
|
||||
filter_properties['group_hosts'] = []
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
filter_properties['group_hosts'] = ['host2']
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_group_anti_affinity_filter_fails(self):
|
||||
filt_cls = self.class_map['GroupAntiAffinityFilter']()
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
filter_properties = {'group_hosts': ['host1']}
|
||||
filter_properties = {'group_policies': ['anti-affinity'],
|
||||
'group_hosts': ['host1']}
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_group_affinity_filter_passes(self):
|
||||
filt_cls = self.class_map['GroupAffinityFilter']()
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
filter_properties = {'group_hosts': ['host1']}
|
||||
filter_properties = {}
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
filter_properties = {'group_policies': ['anti-affinity']}
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
filter_properties = {'group_policies': ['affinity'],
|
||||
'group_hosts': ['host1']}
|
||||
self.assertTrue(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_group_affinity_filter_fails(self):
|
||||
filt_cls = self.class_map['GroupAffinityFilter']()
|
||||
host = fakes.FakeHostState('host1', 'node1', {})
|
||||
filter_properties = {'group_hosts': ['host2']}
|
||||
filter_properties = {'group_policies': ['affinity'],
|
||||
'group_hosts': ['host2']}
|
||||
self.assertFalse(filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_aggregate_multi_tenancy_isolation_with_meta_passes(self):
|
||||
|
||||
@@ -532,3 +532,25 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase):
|
||||
def test_unimplemented_select_destinations(self):
|
||||
self.assertRaises(NotImplementedError,
|
||||
self.driver.select_destinations, self.context, {}, {})
|
||||
|
||||
|
||||
class SchedulerInstanceGroupData(test.TestCase):
|
||||
|
||||
driver_cls = driver.Scheduler
|
||||
|
||||
def setUp(self):
|
||||
super(SchedulerInstanceGroupData, self).setUp()
|
||||
self.user_id = 'fake_user'
|
||||
self.project_id = 'fake_project'
|
||||
self.context = context.RequestContext(self.user_id, self.project_id)
|
||||
self.driver = self.driver_cls()
|
||||
|
||||
def _get_default_values(self):
|
||||
return {'name': 'fake_name',
|
||||
'user_id': self.user_id,
|
||||
'project_id': self.project_id}
|
||||
|
||||
def _create_instance_group(self, context, values, policies=None,
|
||||
metadata=None, members=None):
|
||||
return db.instance_group_create(context, values, policies=policies,
|
||||
metadata=metadata, members=members)
|
||||
|
||||
Reference in New Issue
Block a user