Remove cells code

Thankfully the bulk of this is neatly organized in a single directory
and can be removed, now that the bulk of the references to it have been
removed. The only complicated area is the tests, though effort has been
taken to minimise the diff here wherever possible.

Part of blueprint remove-cells-v1

Change-Id: Ib0e0b708c46e4330e51f8f8fdfbb02d45aaf0f44
Signed-off-by: Stephen Finucane <sfinucan@redhat.com>
This commit is contained in:
Stephen Finucane
2019-04-04 15:39:08 +01:00
parent 59784cfa6c
commit 817dcc89a9
45 changed files with 199 additions and 12018 deletions
-19
View File
@@ -1,19 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells
"""
TOPIC = 'cells'
-41
View File
@@ -1,41 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base Cells Communication Driver
"""
class BaseCellsDriver(object):
"""The base class for cells communication.
One instance of this class will be created for every neighbor cell
that we find in the DB and it will be associated with the cell in
its CellState.
One instance is also created by the cells manager for setting up
the consumers.
"""
def start_servers(self, msg_runner):
"""Start any messaging servers the driver may need."""
raise NotImplementedError()
def stop_servers(self):
"""Stop accepting messages."""
raise NotImplementedError()
def send_message_to_cell(self, cell_state, message):
"""Send a message to a cell."""
raise NotImplementedError()
-56
View File
@@ -1,56 +0,0 @@
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell scheduler filters
"""
from nova import filters
class BaseCellFilter(filters.BaseFilter):
"""Base class for cell filters."""
def authorized(self, ctxt):
"""Return whether or not the context is authorized for this filter
based on policy.
The policy action is "cells_scheduler_filter:<name>" where <name>
is the name of the filter class.
"""
name = 'cells_scheduler_filter:' + self.__class__.__name__
return ctxt.can(name, fatal=False)
def _filter_one(self, cell, filter_properties):
return self.cell_passes(cell, filter_properties)
def cell_passes(self, cell, filter_properties):
"""Return True if the CellState passes the filter, otherwise False.
Override this in a subclass.
"""
raise NotImplementedError()
class CellFilterHandler(filters.BaseFilterHandler):
def __init__(self):
super(CellFilterHandler, self).__init__(BaseCellFilter)
def all_filters():
"""Return a list of filter classes found in this directory.
This method is used as the default for available scheduler filters
and should return a list of all filter classes available.
"""
return CellFilterHandler().get_all_classes()
-62
View File
@@ -1,62 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Different cell filter.
A scheduler hint of 'different_cell' with a value of a full cell name may be
specified to route a build away from a particular cell.
"""
import six
from nova.cells import filters
from nova.cells import utils as cells_utils
class DifferentCellFilter(filters.BaseCellFilter):
"""Different cell filter. Works by specifying a scheduler hint of
'different_cell'. The value should be the full cell path.
"""
def filter_all(self, cells, filter_properties):
"""Override filter_all() which operates on the full list
of cells...
"""
scheduler_hints = filter_properties.get('scheduler_hints')
if not scheduler_hints:
return cells
cell_routes = scheduler_hints.get('different_cell')
if not cell_routes:
return cells
if isinstance(cell_routes, six.string_types):
cell_routes = [cell_routes]
if not self.authorized(filter_properties['context']):
# No filtering, if not authorized.
return cells
routing_path = filter_properties['routing_path']
filtered_cells = []
for cell in cells:
if not self._cell_state_matches(cell, routing_path, cell_routes):
filtered_cells.append(cell)
return filtered_cells
def _cell_state_matches(self, cell_state, routing_path, cell_routes):
cell_route = routing_path
if not cell_state.is_me:
cell_route += cells_utils.PATH_CELL_SEP + cell_state.name
if cell_route in cell_routes:
return True
return False
-67
View File
@@ -1,67 +0,0 @@
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image properties filter.
Image metadata named 'hypervisor_version_requires' with a version specification
may be specified to ensure the build goes to a cell which has hypervisors of
the required version.
If either the version requirement on the image or the hypervisor capability
of the cell is not present, this filter returns without filtering out the
cells.
"""
from distutils import versionpredicate
from nova.cells import filters
class ImagePropertiesFilter(filters.BaseCellFilter):
"""Image properties filter. Works by specifying the hypervisor required in
the image metadata and the supported hypervisor version in cell
capabilities.
"""
def filter_all(self, cells, filter_properties):
"""Override filter_all() which operates on the full list
of cells...
"""
request_spec = filter_properties.get('request_spec', {})
image_properties = request_spec.get('image', {}).get('properties', {})
hypervisor_version_requires = image_properties.get(
'hypervisor_version_requires')
if hypervisor_version_requires is None:
return cells
filtered_cells = []
for cell in cells:
version = cell.capabilities.get('prominent_hypervisor_version')
if version:
l = list(version)
version = str(l[0])
if not version or self._matches_version(version,
hypervisor_version_requires):
filtered_cells.append(cell)
return filtered_cells
def _matches_version(self, version, version_requires):
predicate = versionpredicate.VersionPredicate(
'prop (%s)' % version_requires)
return predicate.satisfied_by(version)
-71
View File
@@ -1,71 +0,0 @@
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Target cell filter.
A scheduler hint of 'target_cell' with a value of a full cell name may be
specified to route a build to a particular cell. No error handling is
done as there's no way to know whether the full path is a valid.
"""
from oslo_log import log as logging
from nova.cells import filters
LOG = logging.getLogger(__name__)
class TargetCellFilter(filters.BaseCellFilter):
"""Target cell filter. Works by specifying a scheduler hint of
'target_cell'. The value should be the full cell path.
"""
def filter_all(self, cells, filter_properties):
"""Override filter_all() which operates on the full list
of cells...
"""
scheduler_hints = filter_properties.get('scheduler_hints')
if not scheduler_hints:
return cells
# This filter only makes sense at the top level, as a full
# cell name is specified. So we pop 'target_cell' out of the
# hints dict.
cell_name = scheduler_hints.pop('target_cell', None)
if not cell_name:
return cells
# This authorization is after popping off target_cell, so
# that in case this fails, 'target_cell' is not left in the
# dict when child cells go to schedule.
if not self.authorized(filter_properties['context']):
# No filtering, if not authorized.
return cells
LOG.info("Forcing direct route to %(cell_name)s because "
"of 'target_cell' scheduler hint",
{'cell_name': cell_name})
scheduler = filter_properties['scheduler']
if cell_name == filter_properties['routing_path']:
return [scheduler.state_manager.get_my_state()]
ctxt = filter_properties['context']
scheduler.msg_runner.build_instances(ctxt, cell_name,
filter_properties['host_sched_kwargs'])
# Returning None means to skip further scheduling, because we
# handled it.
-529
View File
@@ -1,529 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Service Manager
"""
import datetime
import time
from oslo_log import log as logging
import oslo_messaging
from oslo_service import periodic_task
from oslo_utils import timeutils
from six.moves import range
from nova.cells import messaging
from nova.cells import rpc_driver as cells_rpc_driver
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova.compute import rpcapi as compute_rpcapi
import nova.conf
from nova import context
from nova import exception
from nova import manager
from nova import objects
from nova.objects import instance as instance_obj
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class CellsManager(manager.Manager):
"""The nova-cells manager class. This class defines RPC
methods that the local cell may call. This class is NOT used for
messages coming from other cells. That communication is
driver-specific.
Communication to other cells happens via the nova.cells.messaging module.
The MessageRunner from that module will handle routing the message to
the correct cell via the communications driver. Most methods below
create 'targeted' (where we want to route a message to a specific cell)
or 'broadcast' (where we want a message to go to multiple cells)
messages.
Scheduling requests get passed to the scheduler class.
"""
target = oslo_messaging.Target(version='1.38')
def __init__(self, *args, **kwargs):
LOG.warning('The cells feature of Nova is considered experimental '
'by the OpenStack project because it receives much '
'less testing than the rest of Nova. This may change '
'in the future, but current deployers should be aware '
'that the use of it in production right now may be '
'risky. Also note that cells does not currently '
'support rolling upgrades, it is assumed that cells '
'deployments are upgraded lockstep so n-1 cells '
'compatibility does not work.')
# Mostly for tests.
cell_state_manager = kwargs.pop('cell_state_manager', None)
super(CellsManager, self).__init__(service_name='cells',
*args, **kwargs)
if cell_state_manager is None:
cell_state_manager = cells_state.CellStateManager
self.state_manager = cell_state_manager()
self.msg_runner = messaging.MessageRunner(self.state_manager)
self.driver = cells_rpc_driver.CellsRPCDriver()
self.instances_to_heal = iter([])
def post_start_hook(self):
"""Have the driver start its servers for inter-cell communication.
Also ask our child cells for their capacities and capabilities so
we get them more quickly than just waiting for the next periodic
update. Receiving the updates from the children will cause us to
update our parents. If we don't have any children, just update
our parents immediately.
"""
# FIXME(comstud): There's currently no hooks when services are
# stopping, so we have no way to stop servers cleanly.
self.driver.start_servers(self.msg_runner)
ctxt = context.get_admin_context()
if self.state_manager.get_child_cells():
self.msg_runner.ask_children_for_capabilities(ctxt)
self.msg_runner.ask_children_for_capacities(ctxt)
else:
self._update_our_parents(ctxt)
@periodic_task.periodic_task
def _update_our_parents(self, ctxt):
"""Update our parent cells with our capabilities and capacity
if we're at the bottom of the tree.
"""
self.msg_runner.tell_parents_our_capabilities(ctxt)
self.msg_runner.tell_parents_our_capacities(ctxt)
@periodic_task.periodic_task
def _heal_instances(self, ctxt):
"""Periodic task to send updates for a number of instances to
parent cells.
On every run of the periodic task, we will attempt to sync
'CONF.cells.instance_update_num_instances' number of instances.
When we get the list of instances, we shuffle them so that multiple
nova-cells services aren't attempting to sync the same instances
in lockstep.
If CONF.cells.instance_update_at_threshold is set, only attempt
to sync instances that have been updated recently. The CONF
setting defines the maximum number of seconds old the updated_at
can be. Ie, a threshold of 3600 means to only update instances
that have modified in the last hour.
"""
if not self.state_manager.get_parent_cells():
# No need to sync up if we have no parents.
return
info = {'updated_list': False}
def _next_instance():
try:
instance = next(self.instances_to_heal)
except StopIteration:
if info['updated_list']:
return
threshold = CONF.cells.instance_updated_at_threshold
updated_since = None
if threshold > 0:
updated_since = timeutils.utcnow() - datetime.timedelta(
seconds=threshold)
self.instances_to_heal = cells_utils.get_instances_to_sync(
ctxt, updated_since=updated_since, shuffle=True,
uuids_only=True)
info['updated_list'] = True
try:
instance = next(self.instances_to_heal)
except StopIteration:
return
return instance
rd_context = ctxt.elevated(read_deleted='yes')
for i in range(CONF.cells.instance_update_num_instances):
while True:
# Yield to other greenthreads
time.sleep(0)
instance_uuid = _next_instance()
if not instance_uuid:
return
try:
instance = objects.Instance.get_by_uuid(rd_context,
instance_uuid)
except exception.InstanceNotFound:
continue
self._sync_instance(ctxt, instance)
break
def _sync_instance(self, ctxt, instance):
"""Broadcast an instance_update or instance_destroy message up to
parent cells.
"""
pass
def build_instances(self, ctxt, build_inst_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s) and
forward the request accordingly.
"""
# Target is ourselves first.
filter_properties = build_inst_kwargs.get('filter_properties')
if (filter_properties is not None and
not isinstance(filter_properties['instance_type'],
objects.Flavor)):
# NOTE(danms): Handle pre-1.30 build_instances() call. Remove me
# when we bump the RPC API version to 2.0.
flavor = objects.Flavor(**filter_properties['instance_type'])
build_inst_kwargs['filter_properties'] = dict(
filter_properties, instance_type=flavor)
instances = build_inst_kwargs['instances']
if not isinstance(instances[0], objects.Instance):
# NOTE(danms): Handle pre-1.32 build_instances() call. Remove me
# when we bump the RPC API version to 2.0
build_inst_kwargs['instances'] = instance_obj._make_instance_list(
ctxt, objects.InstanceList(), instances, ['system_metadata',
'metadata'])
our_cell = self.state_manager.get_my_state()
self.msg_runner.build_instances(ctxt, our_cell, build_inst_kwargs)
def get_cell_info_for_neighbors(self, _ctxt):
"""Return cell information for our neighbor cells."""
return self.state_manager.get_cell_info_for_neighbors()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
response = self.msg_runner.run_compute_api_method(ctxt,
cell_name,
method_info,
call)
if call:
return response.value_or_raise()
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
if isinstance(instance, dict):
instance = objects.Instance._from_db_object(ctxt,
objects.Instance(), instance)
self.msg_runner.instance_delete_everywhere(ctxt, instance,
delete_type)
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
self.msg_runner.sync_instances(ctxt, project_id, updated_since,
deleted)
def service_get_all(self, ctxt, filters):
"""Return services in this cell and in all child cells."""
responses = self.msg_runner.service_get_all(ctxt, filters)
ret_services = []
# 1 response per cell. Each response is a list of services.
for response in responses:
services = response.value_or_raise()
for service in services:
service = cells_utils.add_cell_to_service(
service, response.cell_name)
ret_services.append(service)
return ret_services
@oslo_messaging.expected_exceptions(exception.CellRoutingInconsistency)
def service_get_by_compute_host(self, ctxt, host_name):
"""Return a service entry for a compute host in a certain cell."""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_get_by_compute_host(ctxt,
cell_name,
host_name)
service = response.value_or_raise()
service = cells_utils.add_cell_to_service(service, response.cell_name)
return service
def get_host_uptime(self, ctxt, host_name):
"""Return host uptime for a compute host in a certain cell
:param host_name: fully qualified hostname. It should be in format of
parent!child@host_id
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.get_host_uptime(ctxt, cell_name,
host_name)
return response.value_or_raise()
def service_update(self, ctxt, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
:returns: the service reference
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_update(
ctxt, cell_name, host_name, binary, params_to_update)
service = response.value_or_raise()
service = cells_utils.add_cell_to_service(service, response.cell_name)
return service
def service_delete(self, ctxt, cell_service_id):
"""Deletes the specified service."""
cell_name, service_id = cells_utils.split_cell_and_item(
cell_service_id)
self.msg_runner.service_delete(ctxt, cell_name, service_id)
@oslo_messaging.expected_exceptions(exception.CellRoutingInconsistency)
def proxy_rpc_to_manager(self, ctxt, topic, rpc_message, call, timeout):
"""Proxy an RPC message as-is to a manager."""
compute_topic = compute_rpcapi.RPC_TOPIC
cell_and_host = topic[len(compute_topic) + 1:]
cell_name, host_name = cells_utils.split_cell_and_item(cell_and_host)
response = self.msg_runner.proxy_rpc_to_manager(ctxt, cell_name,
host_name, topic, rpc_message, call, timeout)
return response.value_or_raise()
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'host' is not None, host will be of the format 'cell!name@host',
with '@host' being optional. The query will be directed to the
appropriate cell and return all task logs, or task logs matching
the host if specified.
'state' also may be None. If it's not, filter by the state as well.
"""
if host is None:
cell_name = None
else:
cell_name, host = cells_utils.split_cell_and_item(host)
# If no cell name was given, assume that the host name is the
# cell_name and that the target is all hosts
if cell_name is None:
cell_name, host = host, cell_name
responses = self.msg_runner.task_log_get_all(ctxt, cell_name,
task_name, period_beginning, period_ending,
host=host, state=state)
# 1 response per cell. Each response is a list of task log
# entries.
ret_task_logs = []
for response in responses:
task_logs = response.value_or_raise()
for task_log in task_logs:
cells_utils.add_cell_to_task_log(task_log,
response.cell_name)
ret_task_logs.append(task_log)
return ret_task_logs
@oslo_messaging.expected_exceptions(exception.CellRoutingInconsistency)
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID or UUID in a specific cell."""
cell_name, compute_id = cells_utils.split_cell_and_item(
compute_id)
response = self.msg_runner.compute_node_get(ctxt, cell_name,
compute_id)
node = response.value_or_raise()
node = cells_utils.add_cell_to_compute_node(node, cell_name)
return node
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells."""
responses = self.msg_runner.compute_node_get_all(ctxt,
hypervisor_match=hypervisor_match)
# 1 response per cell. Each response is a list of compute_node
# entries.
ret_nodes = []
for response in responses:
nodes = response.value_or_raise()
for node in nodes:
node = cells_utils.add_cell_to_compute_node(node,
response.cell_name)
ret_nodes.append(node)
return ret_nodes
def compute_node_stats(self, ctxt):
"""Return compute node stats totals from all cells."""
responses = self.msg_runner.compute_node_stats(ctxt)
totals = {}
for response in responses:
data = response.value_or_raise()
for key, val in data.items():
totals.setdefault(key, 0)
totals[key] += val
return totals
def actions_get(self, ctxt, cell_name, instance_uuid):
response = self.msg_runner.actions_get(ctxt, cell_name, instance_uuid)
return response.value_or_raise()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
response = self.msg_runner.action_get_by_request_id(ctxt, cell_name,
instance_uuid,
request_id)
return response.value_or_raise()
def action_events_get(self, ctxt, cell_name, action_id):
response = self.msg_runner.action_events_get(ctxt, cell_name,
action_id)
return response.value_or_raise()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
self.msg_runner.consoleauth_delete_tokens(ctxt, instance_uuid)
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
instance = objects.Instance.get_by_uuid(ctxt, instance_uuid)
if not instance.cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
response = self.msg_runner.validate_console_port(ctxt,
instance.cell_name, instance_uuid, console_port,
console_type)
return response.value_or_raise()
def get_capacities(self, ctxt, cell_name):
return self.state_manager.get_capacities(cell_name)
def get_migrations(self, ctxt, filters):
"""Fetch migrations applying the filters."""
target_cell = None
if "cell_name" in filters:
_path_cell_sep = cells_utils.PATH_CELL_SEP
target_cell = '%s%s%s' % (CONF.cells.name, _path_cell_sep,
filters['cell_name'])
responses = self.msg_runner.get_migrations(ctxt, target_cell,
False, filters)
migrations = []
for response in responses:
# response.value_or_raise returns MigrationList objects.
# MigrationList.objects returns the list of Migration objects.
migrations.extend(response.value_or_raise().objects)
return objects.MigrationList(objects=migrations)
def start_instance(self, ctxt, instance):
"""Start an instance in its cell."""
self.msg_runner.start_instance(ctxt, instance)
def stop_instance(self, ctxt, instance, do_cast=True,
clean_shutdown=True):
"""Stop an instance in its cell."""
response = self.msg_runner.stop_instance(ctxt, instance,
do_cast=do_cast,
clean_shutdown=clean_shutdown)
if not do_cast:
return response.value_or_raise()
def cell_create(self, ctxt, values):
return self.state_manager.cell_create(ctxt, values)
def cell_update(self, ctxt, cell_name, values):
return self.state_manager.cell_update(ctxt, cell_name, values)
def cell_delete(self, ctxt, cell_name):
return self.state_manager.cell_delete(ctxt, cell_name)
def cell_get(self, ctxt, cell_name):
return self.state_manager.cell_get(ctxt, cell_name)
def reboot_instance(self, ctxt, instance, reboot_type):
"""Reboot an instance in its cell."""
self.msg_runner.reboot_instance(ctxt, instance, reboot_type)
def pause_instance(self, ctxt, instance):
"""Pause an instance in its cell."""
self.msg_runner.pause_instance(ctxt, instance)
def unpause_instance(self, ctxt, instance):
"""Unpause an instance in its cell."""
self.msg_runner.unpause_instance(ctxt, instance)
def suspend_instance(self, ctxt, instance):
"""Suspend an instance in its cell."""
self.msg_runner.suspend_instance(ctxt, instance)
def resume_instance(self, ctxt, instance):
"""Resume an instance in its cell."""
self.msg_runner.resume_instance(ctxt, instance)
def terminate_instance(self, ctxt, instance, delete_type='delete'):
"""Delete an instance in its cell."""
# NOTE(rajesht): The `delete_type` parameter is passed so that it will
# be routed to destination cell, where instance deletion will happen.
self.msg_runner.terminate_instance(ctxt, instance,
delete_type=delete_type)
def soft_delete_instance(self, ctxt, instance):
"""Soft-delete an instance in its cell."""
self.msg_runner.soft_delete_instance(ctxt, instance)
def resize_instance(self, ctxt, instance, flavor,
extra_instance_updates,
clean_shutdown=True):
"""Resize an instance in its cell."""
self.msg_runner.resize_instance(ctxt, instance,
flavor, extra_instance_updates,
clean_shutdown=clean_shutdown)
def live_migrate_instance(self, ctxt, instance, block_migration,
disk_over_commit, host_name):
"""Live migrate an instance in its cell."""
self.msg_runner.live_migrate_instance(ctxt, instance,
block_migration,
disk_over_commit,
host_name)
def revert_resize(self, ctxt, instance):
"""Revert a resize for an instance in its cell."""
self.msg_runner.revert_resize(ctxt, instance)
def confirm_resize(self, ctxt, instance):
"""Confirm a resize for an instance in its cell."""
self.msg_runner.confirm_resize(ctxt, instance)
def reset_network(self, ctxt, instance):
"""Reset networking for an instance in its cell."""
self.msg_runner.reset_network(ctxt, instance)
def inject_network_info(self, ctxt, instance):
"""Inject networking for an instance in its cell."""
self.msg_runner.inject_network_info(ctxt, instance)
def snapshot_instance(self, ctxt, instance, image_id):
"""Snapshot an instance in its cell."""
self.msg_runner.snapshot_instance(ctxt, instance, image_id)
def backup_instance(self, ctxt, instance, image_id, backup_type, rotation):
"""Backup an instance in its cell."""
self.msg_runner.backup_instance(ctxt, instance, image_id,
backup_type, rotation)
def rebuild_instance(self, ctxt, instance, image_href, admin_password,
files_to_inject, preserve_ephemeral, kwargs):
self.msg_runner.rebuild_instance(ctxt, instance, image_href,
admin_password, files_to_inject,
preserve_ephemeral, kwargs)
def set_admin_password(self, ctxt, instance, new_pass):
self.msg_runner.set_admin_password(ctxt, instance, new_pass)
File diff suppressed because it is too large Load Diff
-28
View File
@@ -1,28 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Global cells config options
"""
import nova.conf
CONF = nova.conf.CONF
def get_cell_type():
"""Return the cell type, 'api', 'compute', or None (if cells is disabled).
"""
if not CONF.cells.enable:
return
return CONF.cells.cell_type
-168
View File
@@ -1,168 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells RPC Communication Driver
"""
import oslo_messaging as messaging
from nova.cells import driver
import nova.conf
from nova import rpc
CONF = nova.conf.CONF
class CellsRPCDriver(driver.BaseCellsDriver):
"""Driver for cell<->cell communication via RPC. This is used to
setup the RPC consumers as well as to send a message to another cell.
One instance of this class will be created for every neighbor cell
that we find in the DB and it will be associated with the cell in
its CellState.
One instance is also created by the cells manager for setting up
the consumers.
"""
def __init__(self, *args, **kwargs):
super(CellsRPCDriver, self).__init__(*args, **kwargs)
self.rpc_servers = []
self.intercell_rpcapi = InterCellRPCAPI()
def start_servers(self, msg_runner):
"""Start RPC servers.
Start up 2 separate servers for handling inter-cell
communication via RPC. Both handle the same types of
messages, but requests/replies are separated to solve
potential deadlocks. (If we used the same queue for both,
it's possible to exhaust the RPC thread pool while we wait
for replies.. such that we'd never consume a reply.)
"""
topic_base = CONF.cells.rpc_driver_queue_base
proxy_manager = InterCellRPCDispatcher(msg_runner)
for msg_type in msg_runner.get_message_types():
target = messaging.Target(topic='%s.%s' % (topic_base, msg_type),
server=CONF.host)
# NOTE(comstud): We do not need to use the object serializer
# on this because object serialization is taken care for us in
# the nova.cells.messaging module.
server = rpc.get_server(target, endpoints=[proxy_manager])
server.start()
self.rpc_servers.append(server)
def stop_servers(self):
"""Stop RPC servers.
NOTE: Currently there's no hooks when stopping services
to have managers cleanup, so this is not currently called.
"""
for server in self.rpc_servers:
server.stop()
def send_message_to_cell(self, cell_state, message):
"""Use the IntercellRPCAPI to send a message to a cell."""
self.intercell_rpcapi.send_message_to_cell(cell_state, message)
class InterCellRPCAPI(object):
"""Client side of the Cell<->Cell RPC API.
The CellsRPCDriver uses this to make calls to another cell.
API version history:
1.0 - Initial version.
... Grizzly supports message version 1.0. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.0.
"""
VERSION_ALIASES = {
'grizzly': '1.0',
}
def __init__(self):
super(InterCellRPCAPI, self).__init__()
self.version_cap = (
self.VERSION_ALIASES.get(CONF.upgrade_levels.intercell,
CONF.upgrade_levels.intercell))
self.transports = {}
def _get_client(self, next_hop, topic):
"""Turn the DB information for a cell into a messaging.RPCClient."""
transport = self._get_transport(next_hop)
target = messaging.Target(topic=topic, version='1.0')
serializer = rpc.RequestContextSerializer(None)
return messaging.RPCClient(transport,
target,
version_cap=self.version_cap,
serializer=serializer)
def _get_transport(self, next_hop):
"""NOTE(belliott) Each Transport object contains connection pool
state. Maintain references to them to avoid continual reconnects
to the message broker.
"""
transport_url = next_hop.db_info['transport_url']
if transport_url not in self.transports:
transport = messaging.get_rpc_transport(
nova.conf.CONF, transport_url)
self.transports[transport_url] = transport
else:
transport = self.transports[transport_url]
return transport
def send_message_to_cell(self, cell_state, message):
"""Send a message to another cell by JSON-ifying the message and
making an RPC cast to 'process_message'. If the message says to
fanout, do it. The topic that is used will be
'CONF.rpc_driver_queue_base.<message_type>'.
"""
topic_base = CONF.cells.rpc_driver_queue_base
topic = '%s.%s' % (topic_base, message.message_type)
cctxt = self._get_client(cell_state, topic)
if message.fanout:
cctxt = cctxt.prepare(fanout=message.fanout)
return cctxt.cast(message.ctxt, 'process_message',
message=message.to_json())
class InterCellRPCDispatcher(object):
"""RPC Dispatcher to handle messages received from other cells.
All messages received here have come from a sibling cell. Depending
on the ultimate target and type of message, we may process the message
in this cell, relay the message to another sibling cell, or both. This
logic is defined by the message class in the nova.cells.messaging module.
"""
target = messaging.Target(version='1.0')
def __init__(self, msg_runner):
"""Init the Intercell RPC Dispatcher."""
self.msg_runner = msg_runner
def process_message(self, _ctxt, message):
"""We received a message from another cell. Use the MessageRunner
to turn this from JSON back into an instance of the correct
Message class. Then process it!
"""
message = self.msg_runner.message_from_json(message)
message.process()
-568
View File
@@ -1,568 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of nova-cells RPC API (for talking to the nova-cells service
within a cell).
This is different than communication between child and parent nova-cells
services. That communication is handled by the cells driver via the
messaging module.
"""
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from nova import cells
import nova.conf
from nova import exception
from nova.objects import base as objects_base
from nova import profiler
from nova import rpc
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
@profiler.trace_cls("rpc")
class CellsAPI(object):
'''Cells client-side RPC API
API version history:
* 1.0 - Initial version.
* 1.1 - Adds get_cell_info_for_neighbors() and sync_instances()
* 1.2 - Adds service_get_all(), service_get_by_compute_host(),
and proxy_rpc_to_compute_manager()
* 1.3 - Adds task_log_get_all()
* 1.4 - Adds compute_node_get(), compute_node_get_all(), and
compute_node_stats()
* 1.5 - Adds actions_get(), action_get_by_request_id(), and
action_events_get()
* 1.6 - Adds consoleauth_delete_tokens() and validate_console_port()
... Grizzly supports message version 1.6. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.6.
* 1.7 - Adds service_update()
* 1.8 - Adds build_instances(), deprecates schedule_run_instance()
* 1.9 - Adds get_capacities()
* 1.10 - Adds bdm_update_or_create_at_top(), and bdm_destroy_at_top()
* 1.11 - Adds get_migrations()
* 1.12 - Adds instance_start() and instance_stop()
* 1.13 - Adds cell_create(), cell_update(), cell_delete(), and
cell_get()
* 1.14 - Adds reboot_instance()
* 1.15 - Adds suspend_instance() and resume_instance()
* 1.16 - Adds instance_update_from_api()
* 1.17 - Adds get_host_uptime()
* 1.18 - Adds terminate_instance() and soft_delete_instance()
* 1.19 - Adds pause_instance() and unpause_instance()
* 1.20 - Adds resize_instance() and live_migrate_instance()
* 1.21 - Adds revert_resize() and confirm_resize()
* 1.22 - Adds reset_network()
* 1.23 - Adds inject_network_info()
* 1.24 - Adds backup_instance() and snapshot_instance()
... Havana supports message version 1.24. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.24.
* 1.25 - Adds rebuild_instance()
* 1.26 - Adds service_delete()
* 1.27 - Updates instance_delete_everywhere() for instance objects
... Icehouse supports message version 1.27. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.27.
* 1.28 - Make bdm_update_or_create_at_top and use bdm objects
* 1.29 - Adds set_admin_password()
... Juno supports message version 1.29. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.29.
* 1.30 - Make build_instances() use flavor object
* 1.31 - Add clean_shutdown to stop, resize, rescue, and shelve
* 1.32 - Send objects for instances in build_instances()
* 1.33 - Add clean_shutdown to resize_instance()
* 1.34 - build_instances uses BlockDeviceMapping objects, drops
legacy_bdm argument
... Kilo supports message version 1.34. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.34.
* 1.35 - Make instance_update_at_top, instance_destroy_at_top
and instance_info_cache_update_at_top use instance objects
* 1.36 - Added 'delete_type' parameter to terminate_instance()
* 1.37 - Add get_keypair_at_top to fetch keypair from api cell
... Liberty, Mitaka, Newton, and Ocata support message version 1.37.
So, any changes to existing methods in 1.x after that point should be
done such that they can handle the version_cap being set to
1.37.
* 1.38 - Handle uuid parameter in compute_node_get() method.
'''
VERSION_ALIASES = {
'grizzly': '1.6',
'havana': '1.24',
'icehouse': '1.27',
'juno': '1.29',
'kilo': '1.34',
'liberty': '1.37',
'mitaka': '1.37',
'newton': '1.37',
'ocata': '1.37',
}
def __init__(self):
super(CellsAPI, self).__init__()
target = messaging.Target(topic=cells.TOPIC, version='1.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.cells,
CONF.upgrade_levels.cells)
# NOTE(sbauza): Yes, this is ugly but cells_utils is calling cells.db
# which itself calls cells.rpcapi... You meant import cycling ? Gah.
from nova.cells import utils as cells_utils
serializer = cells_utils.ProxyObjectSerializer()
self.client = rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def cast_compute_api_method(self, ctxt, cell_name, method,
*args, **kwargs):
"""Make a cast to a compute API method in a certain cell."""
method_info = {'method': method,
'method_args': args,
'method_kwargs': kwargs}
self.client.cast(ctxt, 'run_compute_api_method',
cell_name=cell_name,
method_info=method_info,
call=False)
def call_compute_api_method(self, ctxt, cell_name, method,
*args, **kwargs):
"""Make a call to a compute API method in a certain cell."""
method_info = {'method': method,
'method_args': args,
'method_kwargs': kwargs}
return self.client.call(ctxt, 'run_compute_api_method',
cell_name=cell_name,
method_info=method_info,
call=True)
def build_instances(self, ctxt, **kwargs):
"""Build instances."""
build_inst_kwargs = kwargs
instances = build_inst_kwargs['instances']
build_inst_kwargs['image'] = jsonutils.to_primitive(
build_inst_kwargs['image'])
version = '1.34'
if self.client.can_send_version('1.34'):
build_inst_kwargs.pop('legacy_bdm', None)
else:
bdm_p = objects_base.obj_to_primitive(
build_inst_kwargs['block_device_mapping'])
build_inst_kwargs['block_device_mapping'] = bdm_p
version = '1.32'
if not self.client.can_send_version('1.32'):
instances_p = [jsonutils.to_primitive(inst) for inst in instances]
build_inst_kwargs['instances'] = instances_p
version = '1.30'
if not self.client.can_send_version('1.30'):
if 'filter_properties' in build_inst_kwargs:
filter_properties = build_inst_kwargs['filter_properties']
flavor = filter_properties['instance_type']
flavor_p = objects_base.obj_to_primitive(flavor)
filter_properties['instance_type'] = flavor_p
version = '1.8'
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'build_instances',
build_inst_kwargs=build_inst_kwargs)
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""Delete instance everywhere. delete_type may be 'soft'
or 'hard'. This is generally only used to resolve races
when API cell doesn't know to what cell an instance belongs.
"""
if self.client.can_send_version('1.27'):
version = '1.27'
else:
version = '1.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'instance_delete_everywhere', instance=instance,
delete_type=delete_type)
def get_cell_info_for_neighbors(self, ctxt):
"""Get information about our neighbor cells from the manager."""
if not CONF.cells.enable:
return []
cctxt = self.client.prepare(version='1.1')
return cctxt.call(ctxt, 'get_cell_info_for_neighbors')
def sync_instances(self, ctxt, project_id=None, updated_since=None,
deleted=False):
"""Ask all cells to sync instance data."""
cctxt = self.client.prepare(version='1.1')
return cctxt.cast(ctxt, 'sync_instances',
project_id=project_id,
updated_since=updated_since,
deleted=deleted)
def service_get_all(self, ctxt, filters=None):
"""Ask all cells for their list of services."""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(ctxt, 'service_get_all', filters=filters)
def service_get_by_compute_host(self, ctxt, host_name):
"""Get the service entry for a host in a particular cell. The
cell name should be encoded within the host_name.
"""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(ctxt, 'service_get_by_compute_host',
host_name=host_name)
def get_host_uptime(self, context, host_name):
"""Gets the host uptime in a particular cell. The cell name should
be encoded within the host_name
"""
cctxt = self.client.prepare(version='1.17')
return cctxt.call(context, 'get_host_uptime', host_name=host_name)
def service_update(self, ctxt, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
cctxt = self.client.prepare(version='1.7')
return cctxt.call(ctxt, 'service_update',
host_name=host_name,
binary=binary,
params_to_update=params_to_update)
def service_delete(self, ctxt, cell_service_id):
"""Deletes the specified service."""
cctxt = self.client.prepare(version='1.26')
cctxt.call(ctxt, 'service_delete',
cell_service_id=cell_service_id)
def proxy_rpc_to_manager(self, ctxt, rpc_message, topic, call=False,
timeout=None):
"""Proxy RPC to a compute manager. The host in the topic
should be encoded with the target cell name.
"""
cctxt = self.client.prepare(version='1.2', timeout=timeout)
return cctxt.call(ctxt, 'proxy_rpc_to_manager',
topic=topic,
rpc_message=rpc_message,
call=call)
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get the task logs from the DB in child cells."""
cctxt = self.client.prepare(version='1.3')
return cctxt.call(ctxt, 'task_log_get_all',
task_name=task_name,
period_beginning=period_beginning,
period_ending=period_ending,
host=host, state=state)
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID or UUID in a specific cell."""
version = '1.38'
if uuidutils.is_uuid_like(compute_id):
if not self.client.can_send_version(version):
LOG.warning('Unable to get compute node by UUID %s; service '
'is too old or the version is capped.', compute_id)
raise exception.ComputeHostNotFound(host=compute_id)
else:
version = '1.4'
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'compute_node_get', compute_id=compute_id)
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells, optionally
filtering by hypervisor host.
"""
cctxt = self.client.prepare(version='1.4')
return cctxt.call(ctxt, 'compute_node_get_all',
hypervisor_match=hypervisor_match)
def compute_node_stats(self, ctxt):
"""Return compute node stats from all cells."""
cctxt = self.client.prepare(version='1.4')
return cctxt.call(ctxt, 'compute_node_stats')
def actions_get(self, ctxt, instance):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
cctxt = self.client.prepare(version='1.5')
return cctxt.call(ctxt, 'actions_get',
cell_name=instance['cell_name'],
instance_uuid=instance['uuid'])
def action_get_by_request_id(self, ctxt, instance, request_id):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
cctxt = self.client.prepare(version='1.5')
return cctxt.call(ctxt, 'action_get_by_request_id',
cell_name=instance['cell_name'],
instance_uuid=instance['uuid'],
request_id=request_id)
def action_events_get(self, ctxt, instance, action_id):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
cctxt = self.client.prepare(version='1.5')
return cctxt.call(ctxt, 'action_events_get',
cell_name=instance['cell_name'],
action_id=action_id)
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
cctxt = self.client.prepare(version='1.6')
cctxt.cast(ctxt, 'consoleauth_delete_tokens',
instance_uuid=instance_uuid)
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
cctxt = self.client.prepare(version='1.6')
return cctxt.call(ctxt, 'validate_console_port',
instance_uuid=instance_uuid,
console_port=console_port,
console_type=console_type)
def get_capacities(self, ctxt, cell_name=None):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'get_capacities', cell_name=cell_name)
def get_migrations(self, ctxt, filters):
"""Get all migrations applying the filters."""
cctxt = self.client.prepare(version='1.11')
return cctxt.call(ctxt, 'get_migrations', filters=filters)
def start_instance(self, ctxt, instance):
"""Start an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.12')
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True):
"""Stop an instance in its cell.
This method takes a new-world instance object.
"""
msg_args = {'instance': instance,
'do_cast': do_cast}
if self.client.can_send_version('1.31'):
version = '1.31'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '1.12'
cctxt = self.client.prepare(version=version)
method = do_cast and cctxt.cast or cctxt.call
return method(ctxt, 'stop_instance', **msg_args)
def cell_create(self, ctxt, values):
cctxt = self.client.prepare(version='1.13')
return cctxt.call(ctxt, 'cell_create', values=values)
def cell_update(self, ctxt, cell_name, values):
cctxt = self.client.prepare(version='1.13')
return cctxt.call(ctxt, 'cell_update',
cell_name=cell_name, values=values)
def cell_delete(self, ctxt, cell_name):
cctxt = self.client.prepare(version='1.13')
return cctxt.call(ctxt, 'cell_delete', cell_name=cell_name)
def cell_get(self, ctxt, cell_name):
cctxt = self.client.prepare(version='1.13')
return cctxt.call(ctxt, 'cell_get', cell_name=cell_name)
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
"""Reboot an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.14')
cctxt.cast(ctxt, 'reboot_instance', instance=instance,
reboot_type=reboot_type)
def pause_instance(self, ctxt, instance):
"""Pause an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.19')
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def unpause_instance(self, ctxt, instance):
"""Unpause an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.19')
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def suspend_instance(self, ctxt, instance):
"""Suspend an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.15')
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def resume_instance(self, ctxt, instance):
"""Resume an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.15')
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None,
delete_type='delete'):
"""Delete an instance in its cell.
This method takes a new-world instance object.
"""
msg_kwargs = {'instance': instance}
if self.client.can_send_version('1.36'):
version = '1.36'
msg_kwargs['delete_type'] = delete_type
else:
version = '1.18'
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'terminate_instance', **msg_kwargs)
def soft_delete_instance(self, ctxt, instance, reservations=None):
"""Soft-delete an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.18')
cctxt.cast(ctxt, 'soft_delete_instance', instance=instance)
def resize_instance(self, ctxt, instance, extra_instance_updates,
scheduler_hint, flavor, reservations=None,
clean_shutdown=True,
request_spec=None):
# NOTE(sbauza): Since Cells v1 is quite feature-frozen, we don't want
# to pass down request_spec to the manager and rather keep the
# cell conductor providing a new RequestSpec like the original
# behaviour
flavor_p = jsonutils.to_primitive(flavor)
version = '1.33'
msg_args = {'instance': instance,
'flavor': flavor_p,
'extra_instance_updates': extra_instance_updates,
'clean_shutdown': clean_shutdown}
if not self.client.can_send_version(version):
del msg_args['clean_shutdown']
version = '1.20'
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'resize_instance', **msg_args)
def live_migrate_instance(self, ctxt, instance, host_name,
block_migration, disk_over_commit,
request_spec=None):
# NOTE(sbauza): Since Cells v1 is quite feature-freeze, we don't want
# to pass down request_spec to the manager and rather keep the
# cell conductor providing a new RequestSpec like the original
# behaviour
cctxt = self.client.prepare(version='1.20')
cctxt.cast(ctxt, 'live_migrate_instance',
instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit,
host_name=host_name)
def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
cctxt = self.client.prepare(version='1.21')
cctxt.cast(ctxt, 'revert_resize', instance=instance)
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
# NOTE(comstud): This is only used in the API cell where we should
# always cast and ignore the 'cast' kwarg.
# Also, the compute api method normally takes an optional
# 'migration_ref' argument. But this is only used from the manager
# back to the API... which would happen in the child cell.
cctxt = self.client.prepare(version='1.21')
cctxt.cast(ctxt, 'confirm_resize', instance=instance)
def reset_network(self, ctxt, instance):
"""Reset networking for an instance."""
cctxt = self.client.prepare(version='1.22')
cctxt.cast(ctxt, 'reset_network', instance=instance)
def inject_network_info(self, ctxt, instance):
"""Inject networking for an instance."""
cctxt = self.client.prepare(version='1.23')
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def snapshot_instance(self, ctxt, instance, image_id):
cctxt = self.client.prepare(version='1.24')
cctxt.cast(ctxt, 'snapshot_instance',
instance=instance, image_id=image_id)
def backup_instance(self, ctxt, instance, image_id, backup_type, rotation):
cctxt = self.client.prepare(version='1.24')
cctxt.cast(ctxt, 'backup_instance',
instance=instance,
image_id=image_id,
backup_type=backup_type,
rotation=rotation)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, request_spec=None,
kwargs=None):
# NOTE(sbauza): Since Cells v1 is quite feature-freeze, we don't want
# to pass down request_spec to the manager and rather keep the
# cell conductor providing a new RequestSpec like the original
# behaviour
cctxt = self.client.prepare(version='1.25')
cctxt.cast(ctxt, 'rebuild_instance',
instance=instance, image_href=image_ref,
admin_password=new_pass, files_to_inject=injected_files,
preserve_ephemeral=preserve_ephemeral, kwargs=kwargs)
def set_admin_password(self, ctxt, instance, new_pass):
cctxt = self.client.prepare(version='1.29')
cctxt.cast(ctxt, 'set_admin_password', instance=instance,
new_pass=new_pass)
-244
View File
@@ -1,244 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Scheduler
"""
import copy
import time
from oslo_log import log as logging
from six.moves import range
from nova.cells import filters
from nova.cells import weights
from nova import compute
from nova.compute import instance_actions
from nova.compute import vm_states
from nova import conductor
import nova.conf
from nova.db import base
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.scheduler import utils as scheduler_utils
from nova import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class CellsScheduler(base.Base):
"""The cells scheduler."""
def __init__(self, msg_runner):
super(CellsScheduler, self).__init__()
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
self.compute_api = compute.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.filter_handler = filters.CellFilterHandler()
filter_classes = self.filter_handler.get_matching_classes(
CONF.cells.scheduler_filter_classes)
self.filters = [cls() for cls in filter_classes]
self.weight_handler = weights.CellWeightHandler()
weigher_classes = self.weight_handler.get_matching_classes(
CONF.cells.scheduler_weight_classes)
self.weighers = [cls() for cls in weigher_classes]
def _create_instances_here(self, ctxt, instance_uuids, instance_properties,
instance_type, image, security_groups, block_device_mapping):
instance_values = copy.copy(instance_properties)
# The parent may pass these metadata values as lists, and the
# create call expects it to be a dict.
instance_values['metadata'] = utils.instance_meta(instance_values)
# Pop out things that will get set properly when re-creating the
# instance record.
instance_values.pop('id')
instance_values.pop('name')
instance_values.pop('info_cache')
instance_values.pop('security_groups')
instance_values.pop('flavor')
# FIXME(danms): The instance was brutally serialized before being
# sent over RPC to us. Thus, the pci_requests value wasn't really
# sent in a useful form. Since it was getting ignored for cells
# before it was part of the Instance, skip it now until cells RPC
# is sending proper instance objects.
instance_values.pop('pci_requests', None)
# FIXME(danms): Same for ec2_ids
instance_values.pop('ec2_ids', None)
# FIXME(danms): Same for keypairs
instance_values.pop('keypairs', None)
instances = []
num_instances = len(instance_uuids)
security_groups = (
self.compute_api.security_group_api.populate_security_groups(
security_groups))
for i, instance_uuid in enumerate(instance_uuids):
instance = objects.Instance(context=ctxt)
instance.update(instance_values)
instance.uuid = instance_uuid
instance.flavor = instance_type
instance.old_flavor = None
instance.new_flavor = None
instance = self.compute_api.create_db_entry_for_new_instance(
ctxt,
instance_type,
image,
instance,
security_groups,
block_device_mapping,
num_instances, i)
block_device_mapping = (
self.compute_api._bdm_validate_set_size_and_instance(
ctxt, instance, instance_type, block_device_mapping))
self.compute_api._create_block_device_mapping(block_device_mapping)
instances.append(instance)
return instances
def _create_action_here(self, ctxt, instance_uuids):
for instance_uuid in instance_uuids:
objects.InstanceAction.action_start(
ctxt,
instance_uuid,
instance_actions.CREATE,
want_result=False)
def _get_possible_cells(self):
cells = self.state_manager.get_child_cells()
our_cell = self.state_manager.get_my_state()
# Include our cell in the list, if we have any capacity info
if not cells or our_cell.capacities:
cells.append(our_cell)
return cells
def _grab_target_cells(self, filter_properties):
cells = self._get_possible_cells()
cells = self.filter_handler.get_filtered_objects(self.filters, cells,
filter_properties)
# NOTE(comstud): I know this reads weird, but the 'if's are nested
# this way to optimize for the common case where 'cells' is a list
# containing at least 1 entry.
if not cells:
if cells is None:
# None means to bypass further scheduling as a filter
# took care of everything.
return
raise exception.NoCellsAvailable()
weighted_cells = self.weight_handler.get_weighed_objects(
self.weighers, cells, filter_properties)
LOG.debug("Weighted cells: %(weighted_cells)s",
{'weighted_cells': weighted_cells})
target_cells = [cell.obj for cell in weighted_cells]
return target_cells
def _build_instances(self, message, target_cells, instance_uuids,
build_inst_kwargs):
"""Attempt to build instance(s) or send msg to child cell."""
ctxt = message.ctxt
instance_properties = obj_base.obj_to_primitive(
build_inst_kwargs['instances'][0])
filter_properties = build_inst_kwargs['filter_properties']
instance_type = filter_properties['instance_type']
image = build_inst_kwargs['image']
security_groups = build_inst_kwargs['security_groups']
block_device_mapping = build_inst_kwargs['block_device_mapping']
LOG.debug("Building instances with routing_path=%(routing_path)s",
{'routing_path': message.routing_path})
for target_cell in target_cells:
try:
if target_cell.is_me:
# Need to create instance DB entries as the conductor
# expects that the instance(s) already exists.
instances = self._create_instances_here(ctxt,
instance_uuids, instance_properties, instance_type,
image, security_groups, block_device_mapping)
build_inst_kwargs['instances'] = instances
# Need to record the create action in the db as the
# conductor expects it to already exist.
self._create_action_here(ctxt, instance_uuids)
self.compute_task_api.build_instances(ctxt,
**build_inst_kwargs)
return
self.msg_runner.build_instances(ctxt, target_cell,
build_inst_kwargs)
return
except Exception:
LOG.exception("Couldn't communicate with cell '%s'",
target_cell.name)
# FIXME(comstud): Would be nice to kick this back up so that
# the parent cell could retry, if we had a parent.
LOG.error("Couldn't communicate with any cells")
raise exception.NoCellsAvailable()
def build_instances(self, message, build_inst_kwargs):
image = build_inst_kwargs['image']
instance_uuids = [inst['uuid'] for inst in
build_inst_kwargs['instances']]
instances = build_inst_kwargs['instances']
request_spec = scheduler_utils.build_request_spec(image, instances)
filter_properties = copy.copy(build_inst_kwargs['filter_properties'])
filter_properties.update({'context': message.ctxt,
'scheduler': self,
'routing_path': message.routing_path,
'host_sched_kwargs': build_inst_kwargs,
'request_spec': request_spec})
self._schedule_build_to_cells(message, instance_uuids,
filter_properties, self._build_instances, build_inst_kwargs)
def _schedule_build_to_cells(self, message, instance_uuids,
filter_properties, method, method_kwargs):
"""Pick a cell where we should create a new instance(s)."""
try:
for i in range(max(0, CONF.cells.scheduler_retries) + 1):
try:
target_cells = self._grab_target_cells(filter_properties)
if target_cells is None:
# a filter took care of scheduling. skip.
return
return method(message, target_cells, instance_uuids,
method_kwargs)
except exception.NoCellsAvailable:
if i == max(0, CONF.cells.scheduler_retries):
raise
sleep_time = max(1, CONF.cells.scheduler_retry_delay)
LOG.info("No cells available when scheduling. Will "
"retry in %(sleep_time)s second(s)",
{'sleep_time': sleep_time})
time.sleep(sleep_time)
continue
except Exception:
LOG.exception("Error scheduling instances %(instance_uuids)s",
{'instance_uuids': instance_uuids})
ctxt = message.ctxt
for instance_uuid in instance_uuids:
instance = objects.Instance(context=ctxt, uuid=instance_uuid,
vm_state=vm_states.ERROR)
try:
instance.vm_state = vm_states.ERROR
instance.save()
except Exception:
pass
-499
View File
@@ -1,499 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CellState Manager
"""
import collections
import copy
import datetime
import functools
import time
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from nova.cells import rpc_driver
import nova.conf
from nova import context
from nova.db import base
from nova import exception
from nova import objects
from nova import rpc
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class CellState(object):
"""Holds information for a particular cell."""
def __init__(self, cell_name, is_me=False):
self.name = cell_name
self.is_me = is_me
self.last_seen = datetime.datetime.min
self.capabilities = {}
self.capacities = {}
self.db_info = {}
# TODO(comstud): The DB will specify the driver to use to talk
# to this cell, but there's no column for this yet. The only
# available driver is the rpc driver.
self.driver = rpc_driver.CellsRPCDriver()
def update_db_info(self, cell_db_info):
"""Update cell credentials from db."""
self.db_info = {k: v for k, v in cell_db_info.items()
if k != 'name'}
def update_capabilities(self, cell_metadata):
"""Update cell capabilities for a cell."""
self.last_seen = timeutils.utcnow()
self.capabilities = cell_metadata
def update_capacities(self, capacities):
"""Update capacity information for a cell."""
self.last_seen = timeutils.utcnow()
self.capacities = capacities
def get_cell_info(self):
"""Return subset of cell information for OS API use."""
db_fields_to_return = ['is_parent', 'weight_scale', 'weight_offset']
url_fields_to_return = {
'username': 'username',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
cell_info = dict(name=self.name, capabilities=self.capabilities)
if self.db_info:
for field in db_fields_to_return:
cell_info[field] = self.db_info[field]
url = rpc.get_transport_url(self.db_info['transport_url'])
if url.hosts:
for field, canonical in url_fields_to_return.items():
cell_info[canonical] = getattr(url.hosts[0], field)
return cell_info
def send_message(self, message):
"""Send a message to a cell. Just forward this to the driver,
passing ourselves and the message as arguments.
"""
self.driver.send_message_to_cell(self, message)
def __repr__(self):
me = "me" if self.is_me else "not_me"
return "Cell '%s' (%s)" % (self.name, me)
def sync_before(f):
"""Use as a decorator to wrap methods that use cell information to
make sure they sync the latest information from the DB periodically.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
self._cell_data_sync()
return f(self, *args, **kwargs)
return wrapper
def sync_after(f):
"""Use as a decorator to wrap methods that update cell information
in the database to make sure the data is synchronized immediately.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
result = f(self, *args, **kwargs)
self._cell_data_sync(force=True)
return result
return wrapper
_unset = object()
class CellStateManager(base.Base):
def __new__(cls, cell_state_cls=None, cells_config=_unset):
if cls is not CellStateManager:
return super(CellStateManager, cls).__new__(cls)
if cells_config is _unset:
cells_config = CONF.cells.cells_config
if cells_config:
return CellStateManagerFile(cell_state_cls)
return CellStateManagerDB(cell_state_cls)
def __init__(self, cell_state_cls=None):
super(CellStateManager, self).__init__()
if not cell_state_cls:
cell_state_cls = CellState
self.cell_state_cls = cell_state_cls
self.my_cell_state = cell_state_cls(CONF.cells.name, is_me=True)
self.parent_cells = {}
self.child_cells = {}
self.last_cell_db_check = datetime.datetime.min
self.servicegroup_api = servicegroup.API()
attempts = 0
while True:
try:
self._cell_data_sync(force=True)
break
except db_exc.DBError:
attempts += 1
if attempts > 120:
raise
LOG.exception('DB error')
time.sleep(30)
my_cell_capabs = {}
for cap in CONF.cells.capabilities:
name, value = cap.split('=', 1)
if ';' in value:
values = set(value.split(';'))
else:
values = set([value])
my_cell_capabs[name] = values
self.my_cell_state.update_capabilities(my_cell_capabs)
def _refresh_cells_from_dict(self, db_cells_dict):
"""Make our cell info map match the db."""
# Update current cells. Delete ones that disappeared
for cells_dict in (self.parent_cells, self.child_cells):
for cell_name, cell_info in cells_dict.items():
is_parent = cell_info.db_info['is_parent']
db_dict = db_cells_dict.get(cell_name)
if db_dict and is_parent == db_dict['is_parent']:
cell_info.update_db_info(db_dict)
else:
del cells_dict[cell_name]
# Add new cells
for cell_name, db_info in db_cells_dict.items():
if db_info['is_parent']:
cells_dict = self.parent_cells
else:
cells_dict = self.child_cells
if cell_name not in cells_dict:
cells_dict[cell_name] = self.cell_state_cls(cell_name)
cells_dict[cell_name].update_db_info(db_info)
def _time_to_sync(self):
"""Is it time to sync the DB against our memory cache?"""
diff = timeutils.utcnow() - self.last_cell_db_check
return diff.seconds >= CONF.cells.db_check_interval
def _update_our_capacity(self, ctxt=None):
"""Update our capacity in the self.my_cell_state CellState.
This will add/update 2 entries in our CellState.capacities,
'ram_free' and 'disk_free'.
The values of these are both dictionaries with the following
format:
{'total_mb': <total_memory_free_in_the_cell>,
'units_by_mb: <units_dictionary>}
<units_dictionary> contains the number of units that we can build for
every distinct memory or disk requirement that we have based on
instance types. This number is computed by looking at room available
on every compute_node.
Take the following instance_types as an example:
[{'memory_mb': 1024, 'root_gb': 10, 'ephemeral_gb': 100},
{'memory_mb': 2048, 'root_gb': 20, 'ephemeral_gb': 200}]
capacities['ram_free']['units_by_mb'] would contain the following:
{'1024': <number_of_instances_that_will_fit>,
'2048': <number_of_instances_that_will_fit>}
capacities['disk_free']['units_by_mb'] would contain the following:
{'122880': <number_of_instances_that_will_fit>,
'225280': <number_of_instances_that_will_fit>}
Units are in MB, so 122880 = (10 + 100) * 1024.
NOTE(comstud): Perhaps we should only report a single number
available per instance_type.
"""
if not ctxt:
ctxt = context.get_admin_context()
reserve_level = CONF.cells.reserve_percent / 100.0
def _defaultdict_int():
return collections.defaultdict(int)
compute_hosts = collections.defaultdict(_defaultdict_int)
def _get_compute_hosts():
service_refs = {service.host: service
for service in objects.ServiceList.get_by_binary(
ctxt, 'nova-compute')}
compute_nodes = objects.ComputeNodeList.get_all(ctxt)
for compute in compute_nodes:
host = compute.host
service = service_refs.get(host)
if not service or service['disabled']:
continue
# NOTE: This works because it is only used for computes found
# in the cell this is run in. It can not be used to check on
# computes in a child cell from the api cell. If this is run
# in the api cell objects.ComputeNodeList.get_all() above will
# return an empty list.
alive = self.servicegroup_api.service_is_up(service)
if not alive:
continue
chost = compute_hosts[host]
chost['free_ram_mb'] += max(0, compute.free_ram_mb)
chost['free_disk_mb'] += max(0, compute.free_disk_gb) * 1024
chost['total_ram_mb'] += max(0, compute.memory_mb)
chost['total_disk_mb'] += max(0, compute.local_gb) * 1024
_get_compute_hosts()
if not compute_hosts:
self.my_cell_state.update_capacities({})
return
ram_mb_free_units = {}
disk_mb_free_units = {}
total_ram_mb_free = 0
total_disk_mb_free = 0
def _free_units(total, free, per_inst):
if per_inst:
min_free = total * reserve_level
free = max(0, free - min_free)
return int(free / per_inst)
else:
return 0
flavors = objects.FlavorList.get_all(ctxt)
memory_mb_slots = frozenset(
[flavor.memory_mb for flavor in flavors])
disk_mb_slots = frozenset(
[(flavor.root_gb + flavor.ephemeral_gb) * units.Ki
for flavor in flavors])
for compute_values in compute_hosts.values():
total_ram_mb_free += compute_values['free_ram_mb']
total_disk_mb_free += compute_values['free_disk_mb']
for memory_mb_slot in memory_mb_slots:
ram_mb_free_units.setdefault(str(memory_mb_slot), 0)
free_units = _free_units(compute_values['total_ram_mb'],
compute_values['free_ram_mb'], memory_mb_slot)
ram_mb_free_units[str(memory_mb_slot)] += free_units
for disk_mb_slot in disk_mb_slots:
disk_mb_free_units.setdefault(str(disk_mb_slot), 0)
free_units = _free_units(compute_values['total_disk_mb'],
compute_values['free_disk_mb'], disk_mb_slot)
disk_mb_free_units[str(disk_mb_slot)] += free_units
capacities = {'ram_free': {'total_mb': total_ram_mb_free,
'units_by_mb': ram_mb_free_units},
'disk_free': {'total_mb': total_disk_mb_free,
'units_by_mb': disk_mb_free_units}}
self.my_cell_state.update_capacities(capacities)
@sync_before
def get_cell_info_for_neighbors(self):
"""Return cell information for all neighbor cells."""
cell_list = [cell.get_cell_info()
for cell in six.itervalues(self.child_cells)]
cell_list.extend([cell.get_cell_info()
for cell in six.itervalues(self.parent_cells)])
return cell_list
@sync_before
def get_my_state(self):
"""Return information for my (this) cell."""
return self.my_cell_state
@sync_before
def get_child_cells(self):
"""Return list of child cell_infos."""
return list(self.child_cells.values())
@sync_before
def get_parent_cells(self):
"""Return list of parent cell_infos."""
return list(self.parent_cells.values())
@sync_before
def get_parent_cell(self, cell_name):
return self.parent_cells.get(cell_name)
@sync_before
def get_child_cell(self, cell_name):
return self.child_cells.get(cell_name)
@sync_before
def update_cell_capabilities(self, cell_name, capabilities):
"""Update capabilities for a cell."""
cell = (self.child_cells.get(cell_name) or
self.parent_cells.get(cell_name))
if not cell:
LOG.error("Unknown cell '%(cell_name)s' when trying to "
"update capabilities",
{'cell_name': cell_name})
return
# Make sure capabilities are sets.
for capab_name, values in capabilities.items():
capabilities[capab_name] = set(values)
cell.update_capabilities(capabilities)
@sync_before
def update_cell_capacities(self, cell_name, capacities):
"""Update capacities for a cell."""
cell = (self.child_cells.get(cell_name) or
self.parent_cells.get(cell_name))
if not cell:
LOG.error("Unknown cell '%(cell_name)s' when trying to "
"update capacities",
{'cell_name': cell_name})
return
cell.update_capacities(capacities)
@sync_before
def get_our_capabilities(self, include_children=True):
capabs = copy.deepcopy(self.my_cell_state.capabilities)
if include_children:
for cell in self.child_cells.values():
if timeutils.is_older_than(cell.last_seen,
CONF.cells.mute_child_interval):
continue
for capab_name, values in cell.capabilities.items():
if capab_name not in capabs:
capabs[capab_name] = set([])
capabs[capab_name] |= values
return capabs
def _add_to_dict(self, target, src):
for key, value in src.items():
if isinstance(value, dict):
target.setdefault(key, {})
self._add_to_dict(target[key], value)
continue
target.setdefault(key, 0)
target[key] += value
@sync_before
def get_our_capacities(self, include_children=True):
capacities = copy.deepcopy(self.my_cell_state.capacities)
if include_children:
for cell in self.child_cells.values():
self._add_to_dict(capacities, cell.capacities)
return capacities
@sync_before
def get_capacities(self, cell_name=None):
if not cell_name or cell_name == self.my_cell_state.name:
return self.get_our_capacities()
if cell_name in self.child_cells:
return self.child_cells[cell_name].capacities
raise exception.CellNotFound(cell_name=cell_name)
@sync_before
def cell_get(self, ctxt, cell_name):
for cells_dict in (self.parent_cells, self.child_cells):
if cell_name in cells_dict:
return cells_dict[cell_name]
raise exception.CellNotFound(cell_name=cell_name)
class CellStateManagerDB(CellStateManager):
@utils.synchronized('cell-db-sync')
def _cell_data_sync(self, force=False):
"""Update cell status for all cells from the backing data store
when necessary.
:param force: If True, cell status will be updated regardless
of whether it's time to do so.
"""
if force or self._time_to_sync():
LOG.debug("Updating cell cache from db.")
self.last_cell_db_check = timeutils.utcnow()
ctxt = context.get_admin_context()
db_cells = self.db.cell_get_all(ctxt)
db_cells_dict = {cell['name']: cell for cell in db_cells}
self._refresh_cells_from_dict(db_cells_dict)
self._update_our_capacity(ctxt)
@sync_after
def cell_create(self, ctxt, values):
return self.db.cell_create(ctxt, values)
@sync_after
def cell_update(self, ctxt, cell_name, values):
return self.db.cell_update(ctxt, cell_name, values)
@sync_after
def cell_delete(self, ctxt, cell_name):
return self.db.cell_delete(ctxt, cell_name)
class CellStateManagerFile(CellStateManager):
def __init__(self, cell_state_cls=None):
cells_config = CONF.cells.cells_config
self.cells_config_path = CONF.find_file(cells_config)
if not self.cells_config_path:
raise cfg.ConfigFilesNotFoundError(config_files=[cells_config])
super(CellStateManagerFile, self).__init__(cell_state_cls)
def _cell_data_sync(self, force=False):
"""Update cell status for all cells from the backing data store
when necessary.
:param force: If True, cell status will be updated regardless
of whether it's time to do so.
"""
reloaded, data = utils.read_cached_file(self.cells_config_path,
force_reload=force)
if reloaded:
LOG.debug("Updating cell cache from config file.")
self.cells_config_data = jsonutils.loads(data)
self._refresh_cells_from_dict(self.cells_config_data)
if force or self._time_to_sync():
self.last_cell_db_check = timeutils.utcnow()
self._update_our_capacity()
def cell_create(self, ctxt, values):
raise exception.CellsUpdateUnsupported()
def cell_update(self, ctxt, cell_name, values):
raise exception.CellsUpdateUnsupported()
def cell_delete(self, ctxt, cell_name):
raise exception.CellsUpdateUnsupported()
-228
View File
@@ -1,228 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Utility Methods
"""
import random
import sys
import six
import nova.conf
from nova import objects
from nova.objects import base as obj_base
# Separator used between cell names for the 'full cell name' and routing
# path
PATH_CELL_SEP = '!'
# Separator used between cell name and item
CELL_ITEM_SEP = '@'
CONF = nova.conf.CONF
class ProxyObjectSerializer(obj_base.NovaObjectSerializer):
def __init__(self):
super(ProxyObjectSerializer, self).__init__()
self.serializer = super(ProxyObjectSerializer, self)
def _process_object(self, context, objprim):
return _CellProxy.obj_from_primitive(self.serializer, objprim, context)
class _CellProxy(object):
def __init__(self, obj, cell_path):
self._obj = obj
self._cell_path = cell_path
@property
def id(self):
return cell_with_item(self._cell_path, self._obj.id)
@property
def host(self):
return cell_with_item(self._cell_path, self._obj.host)
def __getitem__(self, key):
if key == 'id':
return self.id
if key == 'host':
return self.host
return getattr(self._obj, key)
def __contains__(self, key):
"""Pass-through "in" check to the wrapped object.
This is needed to proxy any types of checks in the calling code
like::
if 'availability_zone' in service:
...
:param key: They key to look for in the wrapped object.
:returns: True if key is in the wrapped object, False otherwise.
"""
return key in self._obj
def obj_to_primitive(self):
obj_p = self._obj.obj_to_primitive()
obj_p['cell_proxy.class_name'] = self.__class__.__name__
obj_p['cell_proxy.cell_path'] = self._cell_path
return obj_p
@classmethod
def obj_from_primitive(cls, serializer, primitive, context=None):
obj_primitive = primitive.copy()
cell_path = obj_primitive.pop('cell_proxy.cell_path', None)
klass_name = obj_primitive.pop('cell_proxy.class_name', None)
obj = serializer._process_object(context, obj_primitive)
if klass_name is not None and cell_path is not None:
klass = getattr(sys.modules[__name__], klass_name)
return klass(obj, cell_path)
else:
return obj
# dict-ish syntax sugar
def _iteritems(self):
"""For backwards-compatibility with dict-based objects.
NOTE(sbauza): May be removed in the future.
"""
for name in self._obj.obj_fields:
if (self._obj.obj_attr_is_set(name) or
name in self._obj.obj_extra_fields):
if name == 'id':
yield name, self.id
elif name == 'host':
yield name, self.host
else:
yield name, getattr(self._obj, name)
if six.PY2:
iteritems = _iteritems
else:
items = _iteritems
def __getattr__(self, key):
return getattr(self._obj, key)
class ComputeNodeProxy(_CellProxy):
pass
class ServiceProxy(_CellProxy):
def __getattr__(self, key):
if key == 'compute_node':
# NOTE(sbauza): As the Service object is still having a nested
# ComputeNode object that consumers of this Proxy don't use, we can
# safely remove it from what's returned
raise AttributeError
# NOTE(claudiub): needed for py34 compatibility.
# get self._obj first, without ending into an infinite recursion.
return getattr(self.__getattribute__("_obj"), key)
def get_instances_to_sync(context, updated_since=None, project_id=None,
deleted=True, shuffle=False, uuids_only=False):
"""Return a generator that will return a list of active and
deleted instances to sync with parent cells. The list may
optionally be shuffled for periodic updates so that multiple
cells services aren't self-healing the same instances in nearly
lockstep.
"""
def _get_paginated_instances(context, filters, shuffle, limit, marker):
instances = objects.InstanceList.get_by_filters(
context, filters, sort_key='deleted', sort_dir='asc',
limit=limit, marker=marker)
if len(instances) > 0:
marker = instances[-1]['uuid']
# NOTE(melwitt/alaski): Need a list that supports assignment for
# shuffle. And pop() on the returned result.
instances = list(instances)
if shuffle:
random.shuffle(instances)
return instances, marker
filters = {}
if updated_since is not None:
filters['changes-since'] = updated_since
if project_id is not None:
filters['project_id'] = project_id
if not deleted:
filters['deleted'] = False
# Active instances first.
limit = CONF.cells.instance_update_sync_database_limit
marker = None
instances = []
while True:
if not instances:
instances, marker = _get_paginated_instances(context, filters,
shuffle, limit, marker)
if not instances:
break
instance = instances.pop(0)
if uuids_only:
yield instance.uuid
else:
yield instance
def cell_with_item(cell_name, item):
"""Turn cell_name and item into <cell_name>@<item>."""
if cell_name is None:
return item
return cell_name + CELL_ITEM_SEP + str(item)
def split_cell_and_item(cell_and_item):
"""Split a combined cell@item and return them."""
result = cell_and_item.rsplit(CELL_ITEM_SEP, 1)
if len(result) == 1:
return (None, cell_and_item)
else:
return result
def add_cell_to_compute_node(compute_node, cell_name):
"""Fix compute_node attributes that should be unique. Allows
API cell to query the 'id' by cell@id.
"""
# NOTE(sbauza): As compute_node is a ComputeNode object, we need to wrap it
# for adding the cell_path information
compute_proxy = ComputeNodeProxy(compute_node, cell_name)
return compute_proxy
def add_cell_to_service(service, cell_name):
"""Fix service attributes that should be unique. Allows
API cell to query the 'id' or 'host' by cell@id/host.
"""
# NOTE(sbauza): As service is a Service object, we need to wrap it
# for adding the cell_path information
service_proxy = ServiceProxy(service, cell_name)
return service_proxy
def add_cell_to_task_log(task_log, cell_name):
"""Fix task_log attributes that should be unique. In particular,
the 'id' and 'host' fields should be prepended with cell name.
"""
task_log['id'] = cell_with_item(cell_name, task_log['id'])
task_log['host'] = cell_with_item(cell_name, task_log['host'])
-43
View File
@@ -1,43 +0,0 @@
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell Scheduler weights
"""
from nova import weights
class WeightedCell(weights.WeighedObject):
def __repr__(self):
return "WeightedCell [cell: %s, weight: %s]" % (
self.obj.name, self.weight)
class BaseCellWeigher(weights.BaseWeigher):
"""Base class for cell weights."""
pass
class CellWeightHandler(weights.BaseWeightHandler):
object_class = WeightedCell
def __init__(self):
super(CellWeightHandler, self).__init__(BaseCellWeigher)
def all_weighers():
"""Return a list of weight plugin classes found in this directory."""
return CellWeightHandler().get_all_classes()
-59
View File
@@ -1,59 +0,0 @@
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
If a child cell hasn't sent capacity or capability updates in a while,
downgrade its likelihood of being chosen for scheduling requests.
"""
from oslo_log import log as logging
from oslo_utils import timeutils
from nova.cells import weights
import nova.conf
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class MuteChildWeigher(weights.BaseCellWeigher):
"""If a child cell hasn't been heard from, greatly lower its selection
weight.
"""
MUTE_WEIGH_VALUE = 1.0
def weight_multiplier(self, host_state):
# negative multiplier => lower weight
return CONF.cells.mute_weight_multiplier
def _weigh_object(self, cell, weight_properties):
"""Check cell against the last_seen timestamp that indicates the time
that the most recent capability or capacity update was received from
the given cell.
"""
last_seen = cell.last_seen
secs = CONF.cells.mute_child_interval
if timeutils.is_older_than(last_seen, secs):
# yep, that's a mute child; recommend highly that it be skipped!
LOG.warning("%(cell)s has not been seen since %(last_seen)s "
"and is being treated as mute.",
{'cell': cell, 'last_seen': last_seen})
return self.MUTE_WEIGH_VALUE
else:
return 0
@@ -1,46 +0,0 @@
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Weigh cells by memory needed in a way that spreads instances.
"""
from nova.cells import weights
import nova.conf
CONF = nova.conf.CONF
class RamByInstanceTypeWeigher(weights.BaseCellWeigher):
"""Weigh cells by instance_type requested."""
def weight_multiplier(self, host_state):
return CONF.cells.ram_weight_multiplier
def _weigh_object(self, cell, weight_properties):
"""Use the 'ram_free' for a particular instance_type advertised from a
child cell's capacity to compute a weight. We want to direct the
build to a cell with a higher capacity. Since higher weights win,
we just return the number of units available for the instance_type.
"""
request_spec = weight_properties['request_spec']
instance_type = request_spec['instance_type']
memory_needed = instance_type['memory_mb']
ram_free = cell.capacities.get('ram_free', {})
units_by_mb = ram_free.get('units_by_mb', {})
return units_by_mb.get(str(memory_needed), 0)
-39
View File
@@ -1,39 +0,0 @@
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Weigh cells by their weight_offset in the DB. Cells with higher
weight_offsets in the DB will be preferred.
"""
from nova.cells import weights
import nova.conf
CONF = nova.conf.CONF
class WeightOffsetWeigher(weights.BaseCellWeigher):
"""Weight cell by weight_offset db field.
Originally designed so you can set a default cell by putting
its weight_offset to 999999999999999 (highest weight wins)
"""
def weight_multiplier(self, host_state):
return CONF.cells.offset_weight_multiplier
def _weigh_object(self, cell, weight_properties):
"""Returns whatever was in the DB for weight_offset."""
return cell.db_info.get('weight_offset', 0)
+3 -12
View File
@@ -16,22 +16,13 @@
from oslo_utils import importutils
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.compute import <foo>' elsewhere.
import nova.cells.opts
import nova.exception
CELL_TYPE_TO_CLS_NAME = {'api': 'nova.compute.cells_api.ComputeCellsAPI',
'compute': 'nova.compute.api.API',
None: 'nova.compute.api.API',
}
# TODO(stephenfin): Remove this nonsense
CELL_TYPE_TO_CLS_NAME = {None: 'nova.compute.api.API'}
def _get_compute_api_class_name():
"""Returns the name of compute API class."""
cell_type = nova.cells.opts.get_cell_type()
return CELL_TYPE_TO_CLS_NAME[cell_type]
return CELL_TYPE_TO_CLS_NAME[None]
def API(*args, **kwargs):
+36 -177
View File
@@ -39,7 +39,6 @@ from six.moves import range
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import instance_list
@@ -207,14 +206,6 @@ def check_instance_lock(function):
return inner
def check_instance_cell(fn):
@six.wraps(fn)
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance)
return fn(self, context, instance, *args, **kwargs)
return _wrapped
def _diff_dict(orig, new):
"""Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
@@ -280,18 +271,6 @@ class API(base.Base):
self.host = CONF.host
super(API, self).__init__(**kwargs)
@property
def cell_type(self):
return getattr(self, '_cell_type', cells_opts.get_cell_type())
def _validate_cell(self, instance):
if self.cell_type != 'api':
return
cell_name = instance.cell_name
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance.uuid)
def _record_action_start(self, context, instance, action):
objects.InstanceAction.action_start(context, instance.uuid,
action, want_result=False)
@@ -1329,50 +1308,16 @@ class API(base.Base):
instances.append(instance)
request_specs.append(rs)
if CONF.cells.enable:
# NOTE(danms): CellsV1 can't do the new thing, so we
# do the old thing here. We can remove this path once
# we stop supporting v1.
for instance in instances:
instance.create()
# NOTE(melwitt): We recheck the quota after creating the objects
# to prevent users from allocating more resources than their
# allowed quota in the event of a race. This is configurable
# because it can be expensive if strict quota limits are not
# required in a deployment.
if CONF.quota.recheck_quota:
try:
compute_utils.check_num_instances_quota(
context, instance_type, 0, 0,
orig_num_req=len(instances))
except exception.TooManyInstances:
with excutils.save_and_reraise_exception():
# Need to clean up all the instances we created
# along with the build requests, request specs,
# and instance mappings.
self._cleanup_build_artifacts(instances,
instances_to_build)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
else:
self.compute_task_api.schedule_and_build_instances(
context,
build_requests=build_requests,
request_spec=request_specs,
image=boot_meta,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
block_device_mapping=block_device_mapping,
tags=tags)
self.compute_task_api.schedule_and_build_instances(
context,
build_requests=build_requests,
request_spec=request_specs,
image=boot_meta,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
block_device_mapping=block_device_mapping,
tags=tags)
return instances, reservation_id
@@ -1907,16 +1852,13 @@ class API(base.Base):
# guaranteed everyone is using cellsv2.
pass
if (inst_map is None or inst_map.cell_mapping is None or
CONF.cells.enable):
if inst_map is None or inst_map.cell_mapping is None:
# If inst_map is None then the deployment has not migrated to
# cellsv2 yet.
# If inst_map.cell_mapping is None then the instance is not in a
# cell yet. Until instance creation moves to the conductor the
# instance can be found in the configured database, so attempt
# to look it up.
# If we're on cellsv1, we can't yet short-circuit the cells
# messaging path
cell = None
try:
instance = objects.Instance.get_by_uuid(context, uuid)
@@ -2089,12 +2031,6 @@ class API(base.Base):
self.consoleauth_rpcapi.delete_tokens_for_instance(
context, instance.uuid)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell.
cb(context, instance, bdms)
return
if not instance.host and not may_have_ports_or_volumes:
try:
with compute_utils.notify_about_instance_delete(
@@ -2271,27 +2207,26 @@ class API(base.Base):
delete_type if delete_type != 'soft_delete' else 'delete'):
elevated = context.elevated()
if self.cell_type != 'api':
# NOTE(liusheng): In nova-network multi_host scenario,deleting
# network info of the instance may need instance['host'] as
# destination host of RPC call. If instance in
# SHELVED_OFFLOADED state, instance['host'] is None, here, use
# shelved_host as host to deallocate network info and reset
# instance['host'] after that. Here we shouldn't use
# instance.save(), because this will mislead user who may think
# the instance's host has been changed, and actually, the
# instance.host is always None.
orig_host = instance.host
try:
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
sysmeta = getattr(instance,
obj_base.get_attrname(
'system_metadata'))
instance.host = sysmeta.get('shelved_host')
self.network_api.deallocate_for_instance(elevated,
instance)
finally:
instance.host = orig_host
# NOTE(liusheng): In nova-network multi_host scenario,deleting
# network info of the instance may need instance['host'] as
# destination host of RPC call. If instance in
# SHELVED_OFFLOADED state, instance['host'] is None, here, use
# shelved_host as host to deallocate network info and reset
# instance['host'] after that. Here we shouldn't use
# instance.save(), because this will mislead user who may think
# the instance's host has been changed, and actually, the
# instance.host is always None.
orig_host = instance.host
try:
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
sysmeta = getattr(instance,
obj_base.get_attrname(
'system_metadata'))
instance.host = sysmeta.get('shelved_host')
self.network_api.deallocate_for_instance(elevated,
instance)
finally:
instance.host = orig_host
# cleanup volumes
self._local_cleanup_bdm_volumes(bdms, instance, context)
@@ -2350,7 +2285,6 @@ class API(base.Base):
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
@@ -2367,7 +2301,6 @@ class API(base.Base):
task_state=task_states.DELETING)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
@@ -2425,7 +2358,6 @@ class API(base.Base):
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR])
def stop(self, context, instance, do_cast=True, clean_shutdown=True):
"""Stop an instance."""
@@ -2433,7 +2365,6 @@ class API(base.Base):
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
@@ -2450,7 +2381,6 @@ class API(base.Base):
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=vm_states.ALLOW_TRIGGER_CRASH_DUMP)
def trigger_crash_dump(self, context, instance):
"""Trigger crash dump in an instance."""
@@ -2577,11 +2507,6 @@ class API(base.Base):
def _get_instance(self, context, instance_uuid, expected_attrs,
cell_down_support=False):
# If we're on cellsv1, we need to consult the top-level
# merged replica instead of the cell directly.
if CONF.cells.enable:
return objects.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=expected_attrs)
inst_map = self._get_instance_map_or_none(context, instance_uuid)
if inst_map and (inst_map.cell_mapping is not None):
instance = self._get_instance_from_cell(context, inst_map,
@@ -2956,7 +2881,6 @@ class API(base.Base):
# NOTE(melwitt): We don't check instance lock for backup because lock is
# intended to prevent accidental change/delete of instances
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def backup(self, context, instance, name, backup_type, rotation,
@@ -3001,7 +2925,6 @@ class API(base.Base):
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
@@ -3236,7 +3159,6 @@ class API(base.Base):
reboot_type='HARD')
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rebuild(self, context, instance, image_href, admin_password,
@@ -3461,7 +3383,6 @@ class API(base.Base):
allowed=total_alloweds)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
@@ -3514,7 +3435,6 @@ class API(base.Base):
migration.dest_compute)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
@@ -3539,28 +3459,7 @@ class API(base.Base):
migration,
migration.source_compute)
@staticmethod
def _resize_cells_support(context, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts.
# We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = objects.Migration(context=context.elevated())
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.migration_type = (
mig.old_instance_type_id != mig.new_instance_type_id and
'resize' or 'migration')
mig.create()
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def resize(self, context, instance, flavor_id=None, clean_shutdown=True,
host_name=None, **extra_instance_updates):
@@ -3621,7 +3520,7 @@ class API(base.Base):
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
if same_instance_type and flavor_id:
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
@@ -3665,12 +3564,6 @@ class API(base.Base):
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
if self.cell_type == 'api':
# Create migration record.
self._resize_cells_support(context, instance,
current_instance_type,
new_instance_type)
if not flavor_id:
self._record_action_start(context, instance,
instance_actions.MIGRATE)
@@ -3767,7 +3660,6 @@ class API(base.Base):
instance=instance, address=address)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def pause(self, context, instance):
"""Pause the given instance."""
@@ -3777,7 +3669,6 @@ class API(base.Base):
self.compute_rpcapi.pause_instance(context, instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
@@ -3798,7 +3689,6 @@ class API(base.Base):
instance=instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def suspend(self, context, instance):
"""Suspend the given instance."""
@@ -3808,7 +3698,6 @@ class API(base.Base):
self.compute_rpcapi.suspend_instance(context, instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
@@ -3856,7 +3745,6 @@ class API(base.Base):
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance.
@@ -3896,13 +3784,6 @@ class API(base.Base):
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
@@ -3923,13 +3804,6 @@ class API(base.Base):
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
@@ -3950,13 +3824,6 @@ class API(base.Base):
return {'url': connect_info['access_url']}
@check_instance_host
def get_rdp_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
@@ -3977,13 +3844,6 @@ class API(base.Base):
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_serial_console_connect_info(self, context, instance, console_type):
"""Used in a child cell to get serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
@@ -4064,13 +3924,11 @@ class API(base.Base):
source=fields_obj.NotificationSource.API)
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
@@ -4181,6 +4039,8 @@ class API(base.Base):
'create a new style volume attachment.')
self.volume_api.reserve_volume(context, volume_id)
# TODO(stephenfin): Fold this back in now that cells v1 no longer needs to
# override it.
def _attach_volume(self, context, instance, volume, device,
disk_bus, device_type, tag=None,
supports_multiattach=False):
@@ -4318,6 +4178,8 @@ class API(base.Base):
disk_bus, device_type, tag=tag,
supports_multiattach=supports_multiattach)
# TODO(stephenfin): Fold this back in now that cells v1 no longer needs to
# override it.
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance.
@@ -4520,7 +4382,6 @@ class API(base.Base):
return _metadata
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name, force=None, async_=False):
@@ -4584,7 +4445,6 @@ class API(base.Base):
messaging_timeout)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE],
task_state=[task_states.MIGRATING])
def live_migrate_force_complete(self, context, instance, migration_id):
@@ -4616,7 +4476,6 @@ class API(base.Base):
context, instance, migration)
@check_instance_lock
@check_instance_cell
@check_instance_state(task_state=[task_states.MIGRATING])
def live_migrate_abort(self, context, instance, migration_id,
support_abort_in_queue=False):
-683
View File
@@ -1,683 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute API that proxies via Cells Service.
This relates to cells v1. This layer is basically responsible for intercepting
compute/api calls at the top layer and forwarding to the child cell to be
replayed there.
"""
import oslo_messaging as messaging
from oslo_utils import excutils
from nova import availability_zones
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import utils as cells_utils
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
check_instance_state = compute_api.check_instance_state
reject_instance_state = compute_api.reject_instance_state
check_instance_lock = compute_api.check_instance_lock
check_instance_cell = compute_api.check_instance_cell
class ComputeRPCAPIRedirect(object):
# NOTE(comstud): These are a list of methods where the cells_rpcapi
# and the compute_rpcapi methods have the same signatures. This
# is for transitioning to a common interface where we can just
# swap out the compute_rpcapi class with the cells_rpcapi class.
cells_compatible = ['start_instance', 'stop_instance',
'reboot_instance', 'suspend_instance',
'resume_instance', 'terminate_instance',
'soft_delete_instance', 'pause_instance',
'unpause_instance', 'revert_resize',
'confirm_resize', 'reset_network',
'inject_network_info',
'backup_instance', 'snapshot_instance',
'set_admin_password']
def __init__(self, cells_rpcapi):
self.cells_rpcapi = cells_rpcapi
def __getattr__(self, key):
if key in self.cells_compatible:
return getattr(self.cells_rpcapi, key)
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class ConductorTaskRPCAPIRedirect(object):
# NOTE(comstud): These are a list of methods where the cells_rpcapi
# and the compute_task_rpcapi methods have the same signatures. This
# is for transitioning to a common interface where we can just
# swap out the compute_task_rpcapi class with the cells_rpcapi class.
cells_compatible = ['build_instances', 'resize_instance',
'live_migrate_instance', 'rebuild_instance']
def __init__(self, cells_rpcapi_obj):
self.cells_rpcapi = cells_rpcapi_obj
def __getattr__(self, key):
if key in self.cells_compatible:
return getattr(self.cells_rpcapi, key)
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class RPCClientCellsProxy(object):
def __init__(self, target, version_cap):
super(RPCClientCellsProxy, self).__init__()
self.target = target
self.version_cap = version_cap
self._server = None
self._version = None
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def prepare(self, **kwargs):
ret = type(self)(self.target, self.version_cap)
ret.cells_rpcapi = self.cells_rpcapi
server = kwargs.pop('server', None)
version = kwargs.pop('version', None)
if kwargs:
raise ValueError(_("Unsupported kwargs: %s") % kwargs.keys())
if server:
ret._server = server
if version:
ret._version = version
return ret
def _check_version_cap(self, version):
client = rpc.get_client(self.target, version_cap=self.version_cap)
if not client.can_send_version(version):
raise messaging.RPCVersionCapError(version=version,
version_cap=self.version_cap)
def _make_msg(self, method, **kwargs):
version = self._version if self._version else self.target.version
self._check_version_cap(version)
return {
'method': method,
'namespace': None,
'version': version,
'args': kwargs
}
def _get_topic(self):
if self._server is not None:
return '%s.%s' % (self.target.topic, self._server)
else:
return self.target.topic
def can_send_version(self, version):
client = rpc.get_client(self.target, version_cap=self.version_cap)
return client.can_send_version(version)
def cast(self, ctxt, method, **kwargs):
msg = self._make_msg(method, **kwargs)
topic = self._get_topic()
self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic)
def call(self, ctxt, method, **kwargs):
msg = self._make_msg(method, **kwargs)
topic = self._get_topic()
return self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg,
topic, call=True)
class ComputeRPCProxyAPI(compute_rpcapi.ComputeAPI):
"""Class used to substitute Compute RPC API that will proxy
via the cells manager to a compute manager in a child cell.
"""
def get_client(self, target, version_cap, serializer):
return RPCClientCellsProxy(target, version_cap)
class ComputeCellsAPI(compute_api.API):
def __init__(self, *args, **kwargs):
super(ComputeCellsAPI, self).__init__(*args, **kwargs)
self.cells_rpcapi = cells_rpcapi.CellsAPI()
# Avoid casts/calls directly to compute
self.compute_rpcapi = ComputeRPCAPIRedirect(self.cells_rpcapi)
# Redirect conductor build_instances to cells
self.compute_task_api = ConductorTaskRPCAPIRedirect(self.cells_rpcapi)
self._cell_type = 'api'
def _cast_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance.uuid
cell_name = instance.cell_name
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
self.cells_rpcapi.cast_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _call_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance.uuid
cell_name = instance.cell_name
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
return self.cells_rpcapi.call_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Override compute API's checking of this. It'll happen in
child cell
"""
return max_count
def create(self, *args, **kwargs):
"""We can use the base functionality, but I left this here just
for completeness.
"""
return super(ComputeCellsAPI, self).create(*args, **kwargs)
def _create_block_device_mapping(self, *args, **kwargs):
"""Don't create block device mappings in the API cell.
The child cell will create it and propagate it up to the parent cell.
"""
pass
def force_delete(self, context, instance):
self._handle_cell_delete(context, instance, 'force_delete')
def soft_delete(self, context, instance):
self._handle_cell_delete(context, instance, 'soft_delete')
def delete(self, context, instance):
self._handle_cell_delete(context, instance, 'delete')
def _handle_cell_delete(self, context, instance, method_name):
if not instance.cell_name:
delete_type = method_name == 'soft_delete' and 'soft' or 'hard'
self.cells_rpcapi.instance_delete_everywhere(context,
instance, delete_type)
# NOTE(danms): If we try to delete an instance with no cell,
# there isn't anything to salvage, so we can hard-delete here.
try:
if self._delete_while_booting(context, instance):
return
except exception.ObjectActionError:
# NOTE(alaski): We very likely got here because the host
# constraint in instance.destroy() failed. This likely means
# that an update came up from a child cell and cell_name is
# set now. We handle this similarly to how the
# ObjectActionError is handled below.
with excutils.save_and_reraise_exception() as exc:
_cell, instance = self._lookup_instance(context,
instance.uuid)
if instance is None:
exc.reraise = False
elif instance.cell_name:
exc.reraise = False
self._handle_cell_delete(context, instance,
method_name)
return
# If instance.cell_name was not set it's possible that the Instance
# object here was pulled from a BuildRequest object and is not
# fully populated. Notably it will be missing an 'id' field which
# will prevent instance.destroy from functioning properly. A
# lookup is attempted which will either return a full Instance or
# None if not found. If not found then it's acceptable to skip the
# rest of the delete processing.
_cell, instance = self._lookup_instance(context, instance.uuid)
if instance is None:
# Instance has been deleted out from under us
return
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
super(ComputeCellsAPI, self)._local_delete(context, instance,
bdms, method_name,
self._do_delete)
except exception.ObjectActionError:
# NOTE(alaski): We very likely got here because the host
# constraint in instance.destroy() failed. This likely means
# that an update came up from a child cell and cell_name is
# set now. If so try the delete again.
with excutils.save_and_reraise_exception() as exc:
instance.refresh()
if instance.cell_name:
exc.reraise = False
self._handle_cell_delete(context, instance,
method_name)
return
method = getattr(super(ComputeCellsAPI, self), method_name)
method(context, instance)
@check_instance_cell
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
super(ComputeCellsAPI, self).restore(context, instance)
self._cast_to_cells(context, instance, 'restore')
@check_instance_cell
def evacuate(self, context, instance, host, *args, **kwargs):
"""Evacuate the given instance with the provided attributes."""
if host:
cell_path, host = cells_utils.split_cell_and_item(host)
self._cast_to_cells(context, instance, 'evacuate',
host, *args, **kwargs)
@check_instance_cell
def add_fixed_ip(self, context, instance, *args, **kwargs):
"""Add fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).add_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'add_fixed_ip',
*args, **kwargs)
@check_instance_cell
def remove_fixed_ip(self, context, instance, *args, **kwargs):
"""Remove fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).remove_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'remove_fixed_ip',
*args, **kwargs)
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
# FIXME(comstud): Cache this?
# Also: only calling super() to get state/policy checking
super(ComputeCellsAPI, self).get_diagnostics(context, instance)
return self._call_to_cells(context, instance, 'get_diagnostics')
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
# FIXME(comstud): Cache this?
# Also: only calling super() to get state/policy checking
super(ComputeCellsAPI, self).get_instance_diagnostics(context,
instance)
return self._call_to_cells(context, instance,
'get_instance_diagnostics')
@check_instance_cell
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None, clean_shutdown=True):
"""Rescue the given instance."""
super(ComputeCellsAPI, self).rescue(context, instance,
rescue_password=rescue_password,
rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
self._cast_to_cells(context, instance, 'rescue',
rescue_password=rescue_password,
rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
@check_instance_cell
def unrescue(self, context, instance):
"""Unrescue the given instance."""
super(ComputeCellsAPI, self).unrescue(context, instance)
self._cast_to_cells(context, instance, 'unrescue')
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def shelve(self, context, instance, clean_shutdown=True):
"""Shelve the given instance."""
self._cast_to_cells(context, instance, 'shelve',
clean_shutdown=clean_shutdown)
@check_instance_cell
def shelve_offload(self, context, instance, clean_shutdown=True):
"""Offload the shelved instance."""
super(ComputeCellsAPI, self).shelve_offload(context, instance,
clean_shutdown=clean_shutdown)
self._cast_to_cells(context, instance, 'shelve_offload',
clean_shutdown=clean_shutdown)
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def unshelve(self, context, instance):
"""Unshelve the given instance."""
self._cast_to_cells(context, instance, 'unshelve')
@check_instance_cell
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_vnc_console(self, context, instance, console_type):
"""Get a url to a VNC Console."""
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
connect_info = self._call_to_cells(context, instance,
'get_vnc_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance.uuid, access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_cell
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_spice_console(self, context, instance, console_type):
"""Get a url to a SPICE Console."""
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
connect_info = self._call_to_cells(context, instance,
'get_spice_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance.uuid, access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_cell
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_rdp_console(self, context, instance, console_type):
"""Get a url to a RDP Console."""
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
connect_info = self._call_to_cells(context, instance,
'get_rdp_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance.uuid, access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_cell
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_serial_console(self, context, instance, console_type):
"""Get a url to a serial console."""
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
connect_info = self._call_to_cells(context, instance,
'get_serial_console_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance.uuid, access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_cell
def get_console_output(self, context, instance, *args, **kwargs):
"""Get console output for an instance."""
# NOTE(comstud): Calling super() just to get policy check
super(ComputeCellsAPI, self).get_console_output(context, instance,
*args, **kwargs)
return self._call_to_cells(context, instance, 'get_console_output',
*args, **kwargs)
@check_instance_cell
def _attach_volume(self, context, instance, volume, device,
disk_bus, device_type, tag=None,
supports_multiattach=False):
"""Attach an existing volume to an existing instance."""
if tag:
raise exception.VolumeTaggedAttachNotSupported()
if volume['multiattach']:
# We don't support multiattach volumes with cells v1.
raise exception.MultiattachSupportNotYetAvailable()
self.volume_api.check_availability_zone(context, volume,
instance=instance)
return self._call_to_cells(context, instance, 'attach_volume',
volume['id'], device, disk_bus, device_type)
@check_instance_cell
def _detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
self._cast_to_cells(context, instance, 'detach_volume',
volume)
@check_instance_cell
def associate_floating_ip(self, context, instance, address):
"""Makes calls to network_api to associate_floating_ip.
:param address: is a string floating ip address
"""
self._cast_to_cells(context, instance, 'associate_floating_ip',
address)
@check_instance_cell
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
super(ComputeCellsAPI, self).delete_instance_metadata(context,
instance, key)
self._cast_to_cells(context, instance, 'delete_instance_metadata',
key)
@check_instance_cell
def update_instance_metadata(self, context, instance,
metadata, delete=False):
rv = super(ComputeCellsAPI, self).update_instance_metadata(context,
instance, metadata, delete=delete)
try:
self._cast_to_cells(context, instance,
'update_instance_metadata',
metadata, delete=delete)
except exception.InstanceUnknownCell:
pass
return rv
def get_migrations(self, context, filters):
return self.cells_rpcapi.get_migrations(context, filters)
class HostAPI(compute_api.HostAPI):
"""HostAPI() class for cells.
Implements host management related operations. Works by setting the
RPC API used by the base class to proxy via the cells manager to the
compute manager in the correct cell. Hosts specified with cells will
need to be of the format 'path!to!cell@host'.
DB methods in the base class are also overridden to proxy via the
cells manager.
"""
def __init__(self):
super(HostAPI, self).__init__(rpcapi=ComputeRPCProxyAPI())
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Cannot check this in API cell. This will be checked in the
target child cell.
"""
pass
def set_host_enabled(self, context, host_name, enabled):
try:
result = super(HostAPI, self).set_host_enabled(context, host_name,
enabled)
except exception.CellRoutingInconsistency:
raise exception.HostNotFound(host=host_name)
return result
def host_power_action(self, context, host_name, action):
try:
result = super(HostAPI, self).host_power_action(context, host_name,
action)
except exception.CellRoutingInconsistency:
raise exception.HostNotFound(host=host_name)
return result
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
return self.cells_rpcapi.get_host_uptime(context, host_name)
def service_get_all(self, context, filters=None, set_zones=False,
all_cells=False, cell_down_support=False):
"""Get all services.
Note that this is the cellsv1 variant, which means we ignore the
"all_cells" parameter.
"""
if filters is None:
filters = {}
if 'availability_zone' in filters:
zone_filter = filters.pop('availability_zone')
set_zones = True
else:
zone_filter = None
services = self.cells_rpcapi.service_get_all(context,
filters=filters)
if set_zones:
# TODO(sbauza): set_availability_zones returns flat dicts,
# we should rather modify the RPC API to amend service_get_all by
# adding a set_zones argument
services = availability_zones.set_availability_zones(context,
services)
if zone_filter is not None:
services = [s for s in services
if s['availability_zone'] == zone_filter]
# NOTE(sbauza): As services is a list of flat dicts, we need to
# rehydrate the corresponding ServiceProxy objects
cell_paths = []
for service in services:
cell_path, id = cells_utils.split_cell_and_item(service['id'])
cell_path, host = cells_utils.split_cell_and_item(
service['host'])
service['id'] = id
service['host'] = host
cell_paths.append(cell_path)
services = obj_base.obj_make_list(context,
objects.ServiceList(),
objects.Service,
services)
services = [cells_utils.ServiceProxy(s, c)
for s, c in zip(services, cell_paths)]
return services
def service_get_by_compute_host(self, context, host_name):
try:
return self.cells_rpcapi.service_get_by_compute_host(context,
host_name)
except exception.CellRoutingInconsistency:
raise exception.ComputeHostNotFound(host=host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
return self.cells_rpcapi.service_update(
context, host_name, binary, params_to_update)
def service_delete(self, context, service_id):
"""Deletes the specified service."""
self.cells_rpcapi.service_delete(context, service_id)
def instance_get_all_by_host(self, context, host_name):
"""Get all instances by host. Host might have a cell prepended
to it, so we'll need to strip it out. We don't need to proxy
this call to cells, as we have instance information here in
the API cell.
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
instances = super(HostAPI, self).instance_get_all_by_host(context,
host_name)
if cell_name:
instances = [i for i in instances
if i['cell_name'] == cell_name]
return instances
def task_log_get_all(self, context, task_name, beginning, ending,
host=None, state=None):
"""Return the task logs within a given range from cells,
optionally filtering by the host and/or state. For cells, the
host should be a path like 'path!to!cell@host'. If no @host
is given, only task logs from a particular cell will be returned.
"""
return self.cells_rpcapi.task_log_get_all(context,
task_name,
beginning,
ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Get a compute node from a particular cell by its integer ID or UUID.
compute_id should be in the format of 'path!to!cell@ID'.
"""
try:
return self.cells_rpcapi.compute_node_get(context, compute_id)
except exception.CellRoutingInconsistency:
raise exception.ComputeHostNotFound(host=compute_id)
def compute_node_get_all(self, context, limit=None, marker=None):
# NOTE(lyj): No pagination for cells, just make sure the arguments
# for the method are the same with the compute.api for now.
return self.cells_rpcapi.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.cells_rpcapi.compute_node_get_all(context,
hypervisor_match=hypervisor_match)
def compute_node_statistics(self, context):
return self.cells_rpcapi.compute_node_stats(context)
class InstanceActionAPI(compute_api.InstanceActionAPI):
"""InstanceActionAPI() class for cells."""
def __init__(self):
super(InstanceActionAPI, self).__init__()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def actions_get(self, context, instance, limit=None, marker=None,
filters=None):
# Paging and filtering isn't supported in cells v1.
return self.cells_rpcapi.actions_get(context, instance)
def action_get_by_request_id(self, context, instance, request_id):
return self.cells_rpcapi.action_get_by_request_id(context, instance,
request_id)
def action_events_get(self, context, instance, action_id):
return self.cells_rpcapi.action_events_get(context, instance,
action_id)
-2
View File
@@ -56,7 +56,6 @@ import six
from six.moves import range
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova import compute
from nova.compute import build_results
from nova.compute import claims
@@ -507,7 +506,6 @@ class ComputeManager(manager.Manager):
self.compute_task_api = conductor.ComputeTaskAPI()
self.is_neutron_security_groups = (
openstack_driver.is_neutron_security_groups())
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.query_client = query.SchedulerQueryClient()
self.instance_events = InstanceEvents()
self._sync_power_pool = eventlet.GreenPool(
-1
View File
@@ -462,7 +462,6 @@ class ComputeAPI(object):
'service': service_version})
return version_cap
# Cells overrides this
def get_client(self, target, version_cap, serializer):
if CONF.rpc_response_timeout > rpc.HEARTBEAT_THRESHOLD:
# NOTE(danms): If the operator has overridden RPC timeout
-20
View File
@@ -1281,30 +1281,10 @@ class CellExists(NovaException):
msg_fmt = _("Cell with name %(name)s already exists.")
class CellRoutingInconsistency(NovaException):
msg_fmt = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
msg_fmt = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
msg_fmt = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
msg_fmt = _("No cells available matching scheduling criteria.")
class CellsUpdateUnsupported(NovaException):
msg_fmt = _("Cannot update cells configuration file.")
class InstanceUnknownCell(NotFound):
msg_fmt = _("Cell is not known for instance %(instance_uuid)s")
+2
View File
@@ -172,6 +172,8 @@ class NotificationPublisher(NotificationObject):
# 2.2: New enum for source fields added
VERSION = '2.2'
# TODO(stephenfin): Remove 'nova-cells' from 'NotificationSourceField' enum
# when bumping this object to version 3.0
fields = {
'host': fields.StringField(nullable=False),
'source': fields.NotificationSourceField(nullable=False),
+2
View File
@@ -766,6 +766,8 @@ class NotificationSource(BaseNovaEnum):
SCHEDULER = 'nova-scheduler'
NETWORK = 'nova-network'
CONSOLEAUTH = 'nova-consoleauth'
# TODO(stephenfin): Remove when 'NotificationPublisher' object version is
# bumped to 3.0
CELLS = 'nova-cells'
CONSOLE = 'nova-console'
METADATA = 'nova-metadata'
-2
View File
@@ -22,7 +22,6 @@ from nova.policies import attach_interfaces
from nova.policies import availability_zone
from nova.policies import baremetal_nodes
from nova.policies import base
from nova.policies import cells_scheduler
from nova.policies import console_auth_tokens
from nova.policies import console_output
from nova.policies import consoles
@@ -85,7 +84,6 @@ def list_rules():
attach_interfaces.list_rules(),
availability_zone.list_rules(),
baremetal_nodes.list_rules(),
cells_scheduler.list_rules(),
console_auth_tokens.list_rules(),
console_output.list_rules(),
consoles.list_rules(),
-41
View File
@@ -1,41 +0,0 @@
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
POLICY_ROOT = 'cells_scheduler_filter:%s'
cells_scheduler_policies = [
policy.RuleDefault(
POLICY_ROOT % 'DifferentCellFilter',
'is_admin:True',
"""Different cell filter to route a build away from a particular cell
This policy is read by nova-scheduler process.
"""),
policy.RuleDefault(
POLICY_ROOT % 'TargetCellFilter',
'is_admin:True',
"""Target cell filter to route a build to a particular cell
This policy is read by nova-scheduler process.
""")
]
def list_rules():
return cells_scheduler_policies
@@ -16,7 +16,6 @@
import copy
import datetime
import iso8601
import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
@@ -26,7 +25,6 @@ import webob.exc
from nova.api.openstack.compute import services as services_v21
from nova.api.openstack import wsgi as os_wsgi
from nova import availability_zones
from nova.cells import utils as cells_utils
from nova import compute
from nova import context
from nova import exception
@@ -1317,78 +1315,6 @@ class ServicesTestV253(test.TestCase):
six.text_type(ex))
class ServicesCellsTestV21(test.TestCase):
def setUp(self):
super(ServicesCellsTestV21, self).setUp()
host_api = compute.cells_api.HostAPI()
self._set_up_controller()
self.controller.host_api = host_api
self.useFixture(utils_fixture.TimeFixture(fake_utcnow()))
services_list = []
for service in fake_services_list:
service = service.copy()
del service['version']
service_obj = objects.Service(**service)
service_proxy = cells_utils.ServiceProxy(service_obj, 'cell1')
services_list.append(service_proxy)
host_api.cells_rpcapi.service_get_all = (
mock.Mock(side_effect=fake_service_get_all(services_list)))
def _set_up_controller(self):
self.controller = services_v21.ServiceController()
def _process_out(self, res_dict):
for res in res_dict['services']:
res.pop('disabled_reason')
def test_services_detail(self):
req = fakes.HTTPRequest.blank('/fake/services',
use_admin_context=True)
res_dict = self.controller.index(req)
utc = iso8601.UTC
response = {'services': [
{'id': 'cell1@1',
'binary': 'nova-scheduler',
'host': 'cell1@host1',
'zone': 'internal',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2,
tzinfo=utc)},
{'id': 'cell1@2',
'binary': 'nova-compute',
'host': 'cell1@host1',
'zone': 'nova',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5,
tzinfo=utc)},
{'id': 'cell1@3',
'binary': 'nova-scheduler',
'host': 'cell1@host2',
'zone': 'internal',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34,
tzinfo=utc)},
{'id': 'cell1@4',
'binary': 'nova-compute',
'host': 'cell1@host2',
'zone': 'nova',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38,
tzinfo=utc)}]}
self._process_out(res_dict)
self.assertEqual(response, res_dict)
class ServicesPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
View File
-213
View File
@@ -1,213 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For Cells tests.
"""
from nova.cells import driver
from nova.cells import manager as cells_manager
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
import nova.conf
import nova.db.api
from nova.db import base
from nova import exception
from nova import objects
CONF = nova.conf.CONF
# Fake Cell Hierarchy
FAKE_TOP_LEVEL_CELL_NAME = 'api-cell'
FAKE_CELL_LAYOUT = [{'child-cell1': []},
{'child-cell2': [{'grandchild-cell1': []}]},
{'child-cell3': [{'grandchild-cell2': []},
{'grandchild-cell3': []}]},
{'child-cell4': []}]
# build_cell_stub_infos() below will take the above layout and create
# a fake view of the DB from the perspective of each of the cells.
# For each cell, a CellStubInfo will be created with this info.
CELL_NAME_TO_STUB_INFO = {}
class FakeDBApi(object):
"""Cells uses a different DB in each cell. This means in order to
stub out things differently per cell, I need to create a fake DBApi
object that is instantiated by each fake cell.
"""
def __init__(self, cell_db_entries):
self.cell_db_entries = cell_db_entries
def __getattr__(self, key):
return getattr(nova.db.api, key)
def cell_get_all(self, ctxt):
return self.cell_db_entries
def instance_get_all_by_filters(self, ctxt, *args, **kwargs):
return []
def instance_get_by_uuid(self, ctxt, instance_uuid):
raise exception.InstanceNotFound(instance_id=instance_uuid)
class FakeCellsDriver(driver.BaseCellsDriver):
pass
class FakeCellState(cells_state.CellState):
def send_message(self, message):
message_runner = get_message_runner(self.name)
orig_ctxt = message.ctxt
json_message = message.to_json()
message = message_runner.message_from_json(json_message)
# Restore this so we can use mox and verify same context
message.ctxt = orig_ctxt
message.process()
class FakeCellStateManager(cells_state.CellStateManagerDB):
def __init__(self, *args, **kwargs):
super(FakeCellStateManager, self).__init__(*args,
cell_state_cls=FakeCellState, **kwargs)
class FakeCellsManager(cells_manager.CellsManager):
def __init__(self, *args, **kwargs):
super(FakeCellsManager, self).__init__(*args,
cell_state_manager=FakeCellStateManager,
**kwargs)
class CellStubInfo(object):
def __init__(self, test_case, cell_name, db_entries):
self.test_case = test_case
self.cell_name = cell_name
self.db_entries = db_entries
def fake_base_init(_self, *args, **kwargs):
_self.db = FakeDBApi(db_entries)
@staticmethod
def _fake_compute_node_get_all(context):
return []
@staticmethod
def _fake_service_get_by_binary(context, binary):
return []
test_case.stubs.Set(base.Base, '__init__', fake_base_init)
test_case.stubs.Set(objects.ComputeNodeList, 'get_all',
_fake_compute_node_get_all)
test_case.stubs.Set(objects.ServiceList, 'get_by_binary',
_fake_service_get_by_binary)
self.cells_manager = FakeCellsManager()
# Fix the cell name, as it normally uses CONF.cells.name
msg_runner = self.cells_manager.msg_runner
msg_runner.our_name = self.cell_name
self.cells_manager.state_manager.my_cell_state.name = self.cell_name
def _build_cell_transport_url(cur_db_id):
username = 'username%s' % cur_db_id
password = 'password%s' % cur_db_id
hostname = 'rpc_host%s' % cur_db_id
port = 3090 + cur_db_id
virtual_host = 'rpc_vhost%s' % cur_db_id
return 'rabbit://%s:%s@%s:%s/%s' % (username, password, hostname, port,
virtual_host)
def _build_cell_stub_info(test_case, our_name, parent_path, children):
cell_db_entries = []
cur_db_id = 1
sep_char = cells_utils.PATH_CELL_SEP
if parent_path:
cell_db_entries.append(
dict(id=cur_db_id,
name=parent_path.split(sep_char)[-1],
is_parent=True,
transport_url=_build_cell_transport_url(cur_db_id)))
cur_db_id += 1
our_path = parent_path + sep_char + our_name
else:
our_path = our_name
for child in children:
for child_name, grandchildren in child.items():
_build_cell_stub_info(test_case, child_name, our_path,
grandchildren)
cell_entry = dict(id=cur_db_id,
name=child_name,
transport_url=_build_cell_transport_url(
cur_db_id),
is_parent=False)
cell_db_entries.append(cell_entry)
cur_db_id += 1
stub_info = CellStubInfo(test_case, our_name, cell_db_entries)
CELL_NAME_TO_STUB_INFO[our_name] = stub_info
def _build_cell_stub_infos(test_case):
_build_cell_stub_info(test_case, FAKE_TOP_LEVEL_CELL_NAME, '',
FAKE_CELL_LAYOUT)
def init(test_case):
global CELL_NAME_TO_STUB_INFO
CELL_NAME_TO_STUB_INFO = {}
_build_cell_stub_infos(test_case)
def _get_cell_stub_info(cell_name):
return CELL_NAME_TO_STUB_INFO[cell_name]
def get_state_manager(cell_name):
return _get_cell_stub_info(cell_name).cells_manager.state_manager
def get_cell_state(cur_cell_name, tgt_cell_name):
state_manager = get_state_manager(cur_cell_name)
cell = state_manager.child_cells.get(tgt_cell_name)
if cell is None:
cell = state_manager.parent_cells.get(tgt_cell_name)
return cell
def get_cells_manager(cell_name):
return _get_cell_stub_info(cell_name).cells_manager
def get_message_runner(cell_name):
return _get_cell_stub_info(cell_name).cells_manager.msg_runner
def stub_tgt_method(test_case, cell_name, method_name, method):
msg_runner = get_message_runner(cell_name)
tgt_msg_methods = msg_runner.methods_by_type['targeted']
setattr(tgt_msg_methods, method_name, method)
def stub_bcast_method(test_case, cell_name, method_name, method):
msg_runner = get_message_runner(cell_name)
tgt_msg_methods = msg_runner.methods_by_type['broadcast']
setattr(tgt_msg_methods, method_name, method)
def stub_bcast_methods(test_case, method_name, method):
for cell_name in CELL_NAME_TO_STUB_INFO.keys():
stub_bcast_method(test_case, cell_name, method_name, method)
-230
View File
@@ -1,230 +0,0 @@
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for cells scheduler filters.
"""
from nova.cells import filters
from nova.cells import state
from nova import context
from nova.db.sqlalchemy import models
from nova import test
from nova.tests.unit.cells import fakes
class FiltersTestCase(test.NoDBTestCase):
"""Makes sure the proper filters are in the directory."""
def test_all_filters(self):
filter_classes = filters.all_filters()
class_names = [cls.__name__ for cls in filter_classes]
self.assertIn("TargetCellFilter", class_names)
self.assertIn("DifferentCellFilter", class_names)
class _FilterTestClass(test.NoDBTestCase):
"""Base class for testing individual filter plugins."""
filter_cls_name = None
def setUp(self):
super(_FilterTestClass, self).setUp()
fakes.init(self)
self.msg_runner = fakes.get_message_runner('api-cell')
self.scheduler = self.msg_runner.scheduler
self.my_cell_state = self.msg_runner.state_manager.get_my_state()
self.filter_handler = filters.CellFilterHandler()
filter_classes = self.filter_handler.get_matching_classes(
[self.filter_cls_name])
self.filters = [cls() for cls in filter_classes]
self.context = context.RequestContext('fake', 'fake',
is_admin=True)
def _filter_cells(self, cells, filter_properties):
return self.filter_handler.get_filtered_objects(self.filters,
cells,
filter_properties)
class ImagePropertiesFilter(_FilterTestClass):
filter_cls_name = \
'nova.cells.filters.image_properties.ImagePropertiesFilter'
def setUp(self):
super(ImagePropertiesFilter, self).setUp()
self.cell1 = models.Cell()
self.cell2 = models.Cell()
self.cell3 = models.Cell()
self.cells = [self.cell1, self.cell2, self.cell3]
for cell in self.cells:
cell.capabilities = {}
self.filter_props = {'context': self.context, 'request_spec': {}}
def test_missing_image_properties(self):
self.assertEqual(self.cells,
self._filter_cells(self.cells, self.filter_props))
def test_missing_hypervisor_version_requires(self):
self.filter_props['request_spec'] = {'image': {'properties': {}}}
for cell in self.cells:
cell.capabilities = {"prominent_hypervisor_version": set([u"6.2"])}
self.assertEqual(self.cells,
self._filter_cells(self.cells, self.filter_props))
def test_missing_hypervisor_version_in_cells(self):
image = {'properties': {'hypervisor_version_requires': '>6.2.1'}}
self.filter_props['request_spec'] = {'image': image}
self.cell1.capabilities = {"prominent_hypervisor_version": set([])}
self.assertEqual(self.cells,
self._filter_cells(self.cells, self.filter_props))
def test_cells_matching_hypervisor_version(self):
image = {'properties': {'hypervisor_version_requires': '>6.0, <=6.3'}}
self.filter_props['request_spec'] = {'image': image}
self.cell1.capabilities = {"prominent_hypervisor_version":
set([u"6.2"])}
self.cell2.capabilities = {"prominent_hypervisor_version":
set([u"6.3"])}
self.cell3.capabilities = {"prominent_hypervisor_version":
set([u"6.0"])}
self.assertEqual([self.cell1, self.cell2],
self._filter_cells(self.cells, self.filter_props))
# assert again to verify filter doesn't mutate state
# LP bug #1325705
self.assertEqual([self.cell1, self.cell2],
self._filter_cells(self.cells, self.filter_props))
class TestTargetCellFilter(_FilterTestClass):
filter_cls_name = 'nova.cells.filters.target_cell.TargetCellFilter'
def test_missing_scheduler_hints(self):
cells = [1, 2, 3]
# No filtering
filter_props = {'context': self.context}
self.assertEqual(cells, self._filter_cells(cells, filter_props))
def test_no_target_cell_hint(self):
cells = [1, 2, 3]
filter_props = {'scheduler_hints': {},
'context': self.context}
# No filtering
self.assertEqual(cells, self._filter_cells(cells, filter_props))
def test_target_cell_specified_me(self):
cells = [1, 2, 3]
target_cell = 'fake!cell!path'
current_cell = 'fake!cell!path'
filter_props = {'scheduler_hints': {'target_cell': target_cell},
'routing_path': current_cell,
'scheduler': self.scheduler,
'context': self.context}
# Only myself in the list.
self.assertEqual([self.my_cell_state],
self._filter_cells(cells, filter_props))
def test_target_cell_specified_me_but_not_admin(self):
ctxt = context.RequestContext('fake', 'fake')
cells = [1, 2, 3]
target_cell = 'fake!cell!path'
current_cell = 'fake!cell!path'
filter_props = {'scheduler_hints': {'target_cell': target_cell},
'routing_path': current_cell,
'scheduler': self.scheduler,
'context': ctxt}
# No filtering, because not an admin.
self.assertEqual(cells, self._filter_cells(cells, filter_props))
def test_target_cell_specified_not_me(self):
info = {}
def _fake_build_instances(self, ctxt, cell, sched_kwargs):
info['ctxt'] = ctxt
info['cell'] = cell
info['sched_kwargs'] = sched_kwargs
self.stub_out('nova.cells.messaging.MessageRunner.build_instances',
_fake_build_instances)
cells = [1, 2, 3]
target_cell = 'fake!cell!path'
current_cell = 'not!the!same'
filter_props = {'scheduler_hints': {'target_cell': target_cell},
'routing_path': current_cell,
'scheduler': self.scheduler,
'context': self.context,
'host_sched_kwargs': 'meow'}
# None is returned to bypass further scheduling.
self.assertIsNone(self._filter_cells(cells, filter_props))
# The filter should have re-scheduled to the child cell itself.
expected_info = {'ctxt': self.context,
'cell': 'fake!cell!path',
'sched_kwargs': 'meow'}
self.assertEqual(expected_info, info)
class TestDifferentCellFilter(_FilterTestClass):
filter_cls_name = 'nova.cells.filters.different_cell.DifferentCellFilter'
def setUp(self):
super(TestDifferentCellFilter, self).setUp()
# We only load one filter so we know the first one is the one we want
self.policy.set_rules({'cells_scheduler_filter:DifferentCellFilter':
''})
self.cells = [state.CellState('1'),
state.CellState('2'),
state.CellState('3')]
def test_missing_scheduler_hints(self):
filter_props = {'context': self.context}
# No filtering
self.assertEqual(self.cells,
self._filter_cells(self.cells, filter_props))
def test_no_different_cell_hint(self):
filter_props = {'scheduler_hints': {},
'context': self.context}
# No filtering
self.assertEqual(self.cells,
self._filter_cells(self.cells, filter_props))
def test_different_cell(self):
filter_props = {'scheduler_hints': {'different_cell': 'fake!2'},
'routing_path': 'fake',
'context': self.context}
filtered_cells = self._filter_cells(self.cells, filter_props)
self.assertEqual(2, len(filtered_cells))
self.assertNotIn(self.cells[1], filtered_cells)
def test_different_multiple_cells(self):
filter_props = {'scheduler_hints':
{'different_cell': ['fake!1', 'fake!2']},
'routing_path': 'fake',
'context': self.context}
filtered_cells = self._filter_cells(self.cells, filter_props)
self.assertEqual(1, len(filtered_cells))
self.assertNotIn(self.cells[0], filtered_cells)
self.assertNotIn(self.cells[1], filtered_cells)
def test_different_cell_specified_me_not_authorized(self):
self.policy.set_rules({'cells_scheduler_filter:DifferentCellFilter':
'!'})
filter_props = {'scheduler_hints': {'different_cell': 'fake!2'},
'routing_path': 'fake',
'context': self.context}
# No filtering, because not an admin.
self.assertEqual(self.cells,
self._filter_cells(self.cells, filter_props))
-813
View File
@@ -1,813 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellsManager
"""
import copy
import datetime
import mock
from oslo_utils import timeutils
from six.moves import range
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova.compute import rpcapi as compute_rpcapi
import nova.conf
from nova import context
from nova import objects
from nova import test
from nova.tests.unit.cells import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_server_actions
from nova.tests.unit.objects import test_flavor
CONF = nova.conf.CONF
FAKE_COMPUTE_NODES = [dict(id=1, host='host1'), dict(id=2, host='host2')]
FAKE_SERVICES = [dict(id=1, host='host1'),
dict(id=2, host='host2'),
dict(id=3, host='host3')]
FAKE_TASK_LOGS = [dict(id=1, host='host1'),
dict(id=2, host='host2')]
class CellsManagerClassTestCase(test.NoDBTestCase):
"""Test case for CellsManager class."""
def setUp(self):
super(CellsManagerClassTestCase, self).setUp()
fakes.init(self)
# pick a child cell to use for tests.
self.our_cell = 'grandchild-cell1'
self.cells_manager = fakes.get_cells_manager(self.our_cell)
self.msg_runner = self.cells_manager.msg_runner
self.state_manager = fakes.get_state_manager(self.our_cell)
self.driver = self.cells_manager.driver
self.ctxt = 'fake_context'
def _get_fake_response(self, raw_response=None, exc=False):
if exc:
return messaging.Response(self.ctxt, 'fake',
test.TestingException(),
True)
if raw_response is None:
raw_response = 'fake-response'
return messaging.Response(self.ctxt, 'fake', raw_response, False)
def test_get_cell_info_for_neighbors(self):
self.mox.StubOutWithMock(self.cells_manager.state_manager,
'get_cell_info_for_neighbors')
self.cells_manager.state_manager.get_cell_info_for_neighbors()
self.mox.ReplayAll()
self.cells_manager.get_cell_info_for_neighbors(self.ctxt)
def test_post_start_hook_child_cell(self):
self.mox.StubOutWithMock(self.driver, 'start_servers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents')
self.driver.start_servers(self.msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
self.cells_manager._update_our_parents(self.ctxt)
self.mox.ReplayAll()
self.cells_manager.post_start_hook()
def test_post_start_hook_middle_cell(self):
cells_manager = fakes.get_cells_manager('child-cell2')
msg_runner = cells_manager.msg_runner
driver = cells_manager.driver
self.mox.StubOutWithMock(driver, 'start_servers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capabilities')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capacities')
driver.start_servers(msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
msg_runner.ask_children_for_capabilities(self.ctxt)
msg_runner.ask_children_for_capacities(self.ctxt)
self.mox.ReplayAll()
cells_manager.post_start_hook()
def test_update_our_parents(self):
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capabilities')
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capacities')
self.msg_runner.tell_parents_our_capabilities(self.ctxt)
self.msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.cells_manager._update_our_parents(self.ctxt)
def test_build_instances(self):
build_inst_kwargs = {'instances': [objects.Instance(),
objects.Instance()]}
self.mox.StubOutWithMock(self.msg_runner, 'build_instances')
our_cell = self.msg_runner.state_manager.get_my_state()
self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs)
self.mox.ReplayAll()
self.cells_manager.build_instances(self.ctxt,
build_inst_kwargs=build_inst_kwargs)
def test_build_instances_old_flavor(self):
flavor_dict = test_flavor.fake_flavor
args = {'filter_properties': {'instance_type': flavor_dict},
'instances': [objects.Instance()]}
with mock.patch.object(self.msg_runner, 'build_instances') as mock_bi:
self.cells_manager.build_instances(self.ctxt,
build_inst_kwargs=args)
filter_properties = mock_bi.call_args[0][2]['filter_properties']
self.assertIsInstance(filter_properties['instance_type'],
objects.Flavor)
def test_build_instances_old_instances(self):
args = {'instances': [fake_instance.fake_db_instance()]}
with mock.patch.object(self.msg_runner, 'build_instances') as mock_bi:
self.cells_manager.build_instances(self.ctxt,
build_inst_kwargs=args)
self.assertIsInstance(mock_bi.call_args[0][2]['instances'][0],
objects.Instance)
def test_run_compute_api_method(self):
# Args should just be silently passed through
cell_name = 'fake-cell-name'
method_info = 'fake-method-info'
self.mox.StubOutWithMock(self.msg_runner,
'run_compute_api_method')
fake_response = self._get_fake_response()
self.msg_runner.run_compute_api_method(self.ctxt,
cell_name,
method_info,
True).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.run_compute_api_method(
self.ctxt, cell_name=cell_name, method_info=method_info,
call=True)
self.assertEqual('fake-response', response)
def test_instance_delete_everywhere(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_delete_everywhere')
self.msg_runner.instance_delete_everywhere(self.ctxt,
'fake-instance',
'fake-type')
self.mox.ReplayAll()
self.cells_manager.instance_delete_everywhere(
self.ctxt, instance='fake-instance',
delete_type='fake-type')
def test_heal_instances(self):
self.flags(instance_updated_at_threshold=1000,
instance_update_num_instances=2,
group='cells')
fake_context = context.RequestContext('fake', 'fake')
stalled_time = timeutils.utcnow()
updated_since = stalled_time - datetime.timedelta(seconds=1000)
def utcnow():
return stalled_time
call_info = {'get_instances': 0, 'sync_instances': []}
instances = ['instance1', 'instance2', 'instance3']
def get_instances_to_sync(context, **kwargs):
self.assertEqual(fake_context, context)
call_info['shuffle'] = kwargs.get('shuffle')
call_info['project_id'] = kwargs.get('project_id')
call_info['updated_since'] = kwargs.get('updated_since')
call_info['get_instances'] += 1
return iter(instances)
@staticmethod
def instance_get_by_uuid(context, uuid):
return instances[int(uuid[-1]) - 1]
def sync_instance(context, instance):
self.assertEqual(fake_context, context)
call_info['sync_instances'].append(instance)
self.stubs.Set(cells_utils, 'get_instances_to_sync',
get_instances_to_sync)
self.stubs.Set(objects.Instance, 'get_by_uuid',
instance_get_by_uuid)
self.stubs.Set(self.cells_manager, '_sync_instance',
sync_instance)
self.stubs.Set(timeutils, 'utcnow', utcnow)
self.cells_manager._heal_instances(fake_context)
self.assertTrue(call_info['shuffle'])
self.assertIsNone(call_info['project_id'])
self.assertEqual(updated_since, call_info['updated_since'])
self.assertEqual(1, call_info['get_instances'])
# Only first 2
self.assertEqual(instances[:2], call_info['sync_instances'])
call_info['sync_instances'] = []
self.cells_manager._heal_instances(fake_context)
self.assertTrue(call_info['shuffle'])
self.assertIsNone(call_info['project_id'])
self.assertEqual(updated_since, call_info['updated_since'])
self.assertEqual(2, call_info['get_instances'])
# Now the last 1 and the first 1
self.assertEqual([instances[-1], instances[0]],
call_info['sync_instances'])
def test_sync_instances(self):
self.mox.StubOutWithMock(self.msg_runner,
'sync_instances')
self.msg_runner.sync_instances(self.ctxt, 'fake-project',
'fake-time', 'fake-deleted')
self.mox.ReplayAll()
self.cells_manager.sync_instances(self.ctxt,
project_id='fake-project',
updated_since='fake-time',
deleted='fake-deleted')
def test_service_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of services.
# Manager should turn these into a single list of responses.
for i in range(3):
cell_name = 'path!to!cell%i' % i
services = []
for service in FAKE_SERVICES:
fake_service = objects.Service(**service)
services.append(fake_service)
expected_service = cells_utils.ServiceProxy(fake_service,
cell_name)
expected_response.append(
(cell_name, expected_service, fake_service))
response = messaging.Response(self.ctxt, cell_name, services,
False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'service_get_all')
self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service')
self.msg_runner.service_get_all(self.ctxt,
'fake-filters').AndReturn(responses)
# Calls are done by cells, so we need to sort the list by the cell name
expected_response.sort(key=lambda k: k[0])
for cell_name, service_proxy, service in expected_response:
cells_utils.add_cell_to_service(
service, cell_name).AndReturn(service_proxy)
self.mox.ReplayAll()
response = self.cells_manager.service_get_all(self.ctxt,
filters='fake-filters')
self.assertEqual([proxy for cell, proxy, service in expected_response],
response)
def test_service_get_by_compute_host(self):
fake_cell = 'fake-cell'
fake_service = objects.Service(**FAKE_SERVICES[0])
fake_response = messaging.Response(self.ctxt, fake_cell,
fake_service,
False)
expected_response = cells_utils.ServiceProxy(fake_service, fake_cell)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.mox.StubOutWithMock(self.msg_runner,
'service_get_by_compute_host')
self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service')
self.msg_runner.service_get_by_compute_host(self.ctxt,
fake_cell, 'fake-host').AndReturn(fake_response)
cells_utils.add_cell_to_service(fake_service, fake_cell).AndReturn(
expected_response)
self.mox.ReplayAll()
response = self.cells_manager.service_get_by_compute_host(self.ctxt,
host_name=cell_and_host)
self.assertEqual(expected_response, response)
def test_get_host_uptime(self):
fake_cell = 'parent!fake-cell'
fake_host = 'fake-host'
fake_cell_and_host = cells_utils.cell_with_item(fake_cell, fake_host)
host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
fake_response = messaging.Response(self.ctxt, fake_cell, host_uptime,
False)
self.mox.StubOutWithMock(self.msg_runner,
'get_host_uptime')
self.msg_runner.get_host_uptime(self.ctxt, fake_cell, fake_host).\
AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.get_host_uptime(self.ctxt,
fake_cell_and_host)
self.assertEqual(host_uptime, response)
def test_service_update(self):
fake_cell = 'fake-cell'
fake_service = objects.Service(**FAKE_SERVICES[0])
fake_response = messaging.Response(
self.ctxt, fake_cell, fake_service, False)
expected_response = cells_utils.ServiceProxy(fake_service, fake_cell)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
params_to_update = {'disabled': True}
self.mox.StubOutWithMock(self.msg_runner, 'service_update')
self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service')
self.msg_runner.service_update(self.ctxt,
fake_cell, 'fake-host', 'nova-api',
params_to_update).AndReturn(fake_response)
cells_utils.add_cell_to_service(fake_service, fake_cell).AndReturn(
expected_response)
self.mox.ReplayAll()
response = self.cells_manager.service_update(
self.ctxt, host_name=cell_and_host, binary='nova-api',
params_to_update=params_to_update)
self.assertEqual(expected_response, response)
def test_service_delete(self):
fake_cell = 'fake-cell'
service_id = '1'
cell_service_id = cells_utils.cell_with_item(fake_cell, service_id)
with mock.patch.object(self.msg_runner,
'service_delete') as service_delete:
self.cells_manager.service_delete(self.ctxt, cell_service_id)
service_delete.assert_called_once_with(
self.ctxt, fake_cell, service_id)
def test_proxy_rpc_to_manager(self):
self.mox.StubOutWithMock(self.msg_runner,
'proxy_rpc_to_manager')
fake_response = self._get_fake_response()
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
topic = "%s.%s" % (compute_rpcapi.RPC_TOPIC, cell_and_host)
self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell',
'fake-host', topic, 'fake-rpc-msg',
True, -1).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.proxy_rpc_to_manager(self.ctxt,
topic=topic, rpc_message='fake-rpc-msg', call=True,
timeout=-1)
self.assertEqual('fake-response', response)
def _build_task_log_responses(self, num):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of task log
# entries. Manager should turn these into a single list of
# task log entries.
for i in range(num):
cell_name = 'path!to!cell%i' % i
task_logs = []
for task_log in FAKE_TASK_LOGS:
task_logs.append(copy.deepcopy(task_log))
expected_task_log = copy.deepcopy(task_log)
cells_utils.add_cell_to_task_log(expected_task_log,
cell_name)
expected_response.append(expected_task_log)
response = messaging.Response(self.ctxt, cell_name, task_logs,
False)
responses.append(response)
return expected_response, responses
def test_task_log_get_all(self):
expected_response, responses = self._build_task_log_responses(3)
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, None,
'fake-name', 'fake-begin',
'fake-end', host=None, state=None).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_filters(self):
expected_response, responses = self._build_task_log_responses(1)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host='fake-host',
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_cell_but_no_host_filters(self):
expected_response, responses = self._build_task_log_responses(1)
# Host filter only has cell name.
cell_and_host = 'fake-cell'
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host=None,
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_compute_node_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of computes.
# Manager should turn these into a single list of responses.
for i in range(3):
cell_name = 'path!to!cell%i' % i
compute_nodes = []
for compute_node in FAKE_COMPUTE_NODES:
fake_compute = objects.ComputeNode(**compute_node)
fake_compute._cached_service = None
compute_nodes.append(fake_compute)
expected_compute_node = cells_utils.ComputeNodeProxy(
fake_compute, cell_name)
expected_response.append(
(cell_name, expected_compute_node, fake_compute))
response = messaging.Response(self.ctxt, cell_name, compute_nodes,
False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get_all')
self.mox.StubOutWithMock(cells_utils, 'add_cell_to_compute_node')
self.msg_runner.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match').AndReturn(responses)
# Calls are done by cells, so we need to sort the list by the cell name
expected_response.sort(key=lambda k: k[0])
for cell_name, compute_proxy, compute_node in expected_response:
cells_utils.add_cell_to_compute_node(
compute_node, cell_name).AndReturn(compute_proxy)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match')
self.assertEqual([proxy for cell, proxy, compute in expected_response],
response)
def test_compute_node_stats(self):
raw_resp1 = {'key1': 1, 'key2': 2}
raw_resp2 = {'key2': 1, 'key3': 2}
raw_resp3 = {'key3': 1, 'key4': 2}
responses = [messaging.Response(self.ctxt, 'cell1', raw_resp1, False),
messaging.Response(self.ctxt, 'cell2', raw_resp2, False),
messaging.Response(self.ctxt, 'cell2', raw_resp3, False)]
expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2}
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_stats')
self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_stats(self.ctxt)
self.assertEqual(expected_resp, response)
def test_compute_node_get(self):
fake_cell = 'fake-cell'
fake_compute = objects.ComputeNode(**FAKE_COMPUTE_NODES[0])
fake_compute._cached_service = None
fake_response = messaging.Response(self.ctxt, fake_cell,
fake_compute,
False)
expected_response = cells_utils.ComputeNodeProxy(fake_compute,
fake_cell)
cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id')
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get')
self.mox.StubOutWithMock(cells_utils, 'add_cell_to_compute_node')
self.msg_runner.compute_node_get(self.ctxt,
'fake-cell', 'fake-id').AndReturn(fake_response)
cells_utils.add_cell_to_compute_node(
fake_compute, fake_cell).AndReturn(expected_response)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get(self.ctxt,
compute_id=cell_and_id)
self.assertEqual(expected_response, response)
def test_actions_get(self):
fake_uuid = fake_server_actions.FAKE_UUID
fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response(self.ctxt, 'fake-cell', [fake_act],
False)
expected_response = [fake_act]
self.mox.StubOutWithMock(self.msg_runner, 'actions_get')
self.msg_runner.actions_get(self.ctxt, 'fake-cell',
'fake-uuid').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.actions_get(self.ctxt, 'fake-cell',
'fake-uuid')
self.assertEqual(expected_response, response)
def test_action_get_by_request_id(self):
fake_uuid = fake_server_actions.FAKE_UUID
fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response(self.ctxt, 'fake-cell', fake_act,
False)
expected_response = fake_act
self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id')
self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell',
'fake-uuid', 'req-fake').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_get_by_request_id(self.ctxt,
'fake-cell',
'fake-uuid',
'req-fake')
self.assertEqual(expected_response, response)
def test_action_events_get(self):
fake_action_id = fake_server_actions.FAKE_ACTION_ID1
fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id]
fake_response = messaging.Response(self.ctxt, 'fake-cell', fake_events,
False)
expected_response = fake_events
self.mox.StubOutWithMock(self.msg_runner, 'action_events_get')
self.msg_runner.action_events_get(self.ctxt, 'fake-cell',
'fake-action').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell',
'fake-action')
self.assertEqual(expected_response, response)
def test_consoleauth_delete_tokens(self):
instance_uuid = 'fake-instance-uuid'
self.mox.StubOutWithMock(self.msg_runner,
'consoleauth_delete_tokens')
self.msg_runner.consoleauth_delete_tokens(self.ctxt, instance_uuid)
self.mox.ReplayAll()
self.cells_manager.consoleauth_delete_tokens(self.ctxt,
instance_uuid=instance_uuid)
def test_get_capacities(self):
cell_name = 'cell_name'
response = {"ram_free":
{"units_by_mb": {"64": 20, "128": 10}, "total_mb": 1491}}
self.mox.StubOutWithMock(self.state_manager,
'get_capacities')
self.state_manager.get_capacities(cell_name).AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.get_capacities(self.ctxt, cell_name))
def test_validate_console_port(self):
instance_uuid = 'fake-instance-uuid'
cell_name = 'fake-cell-name'
instance = objects.Instance(cell_name=cell_name)
console_port = 'fake-console-port'
console_type = 'fake-console-type'
self.mox.StubOutWithMock(self.msg_runner,
'validate_console_port')
self.mox.StubOutWithMock(objects.Instance, 'get_by_uuid')
fake_response = self._get_fake_response()
objects.Instance.get_by_uuid(self.ctxt,
instance_uuid).AndReturn(instance)
self.msg_runner.validate_console_port(self.ctxt, cell_name,
instance_uuid, console_port,
console_type).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.validate_console_port(self.ctxt,
instance_uuid=instance_uuid, console_port=console_port,
console_type=console_type)
self.assertEqual('fake-response', response)
def test_get_migrations(self):
filters = {'status': 'confirmed'}
cell1_migrations = objects.MigrationList(
objects=[objects.Migration(id=123)])
cell2_migrations = objects.MigrationList(
objects=[objects.Migration(id=456)])
fake_responses = [self._get_fake_response(cell1_migrations),
self._get_fake_response(cell2_migrations)]
self.mox.StubOutWithMock(self.msg_runner,
'get_migrations')
self.msg_runner.get_migrations(self.ctxt, None, False, filters).\
AndReturn(fake_responses)
self.mox.ReplayAll()
response = self.cells_manager.get_migrations(self.ctxt, filters)
self.assertEqual(cell1_migrations.objects + cell2_migrations.objects,
response.objects)
def test_get_migrations_for_a_given_cell(self):
filters = {'status': 'confirmed', 'cell_name': 'ChildCell1'}
target_cell = '%s%s%s' % (CONF.cells.name, '!', filters['cell_name'])
migrations = objects.MigrationList(objects=[objects.Migration(id=123)])
fake_responses = [self._get_fake_response(migrations)]
self.mox.StubOutWithMock(self.msg_runner,
'get_migrations')
self.msg_runner.get_migrations(self.ctxt, target_cell, False,
filters).AndReturn(fake_responses)
self.mox.ReplayAll()
response = self.cells_manager.get_migrations(self.ctxt, filters)
self.assertEqual(migrations.objects, response.objects)
def test_start_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'start_instance')
self.msg_runner.start_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.start_instance(self.ctxt, instance='fake-instance')
def test_stop_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'stop_instance')
self.msg_runner.stop_instance(self.ctxt, 'fake-instance',
do_cast='meow',
clean_shutdown='purr')
self.mox.ReplayAll()
self.cells_manager.stop_instance(self.ctxt,
instance='fake-instance',
do_cast='meow',
clean_shutdown='purr')
def test_cell_create(self):
values = 'values'
response = 'created_cell'
self.mox.StubOutWithMock(self.state_manager,
'cell_create')
self.state_manager.cell_create(self.ctxt, values).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_create(self.ctxt, values))
def test_cell_update(self):
cell_name = 'cell_name'
values = 'values'
response = 'updated_cell'
self.mox.StubOutWithMock(self.state_manager,
'cell_update')
self.state_manager.cell_update(self.ctxt, cell_name, values).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_update(self.ctxt, cell_name,
values))
def test_cell_delete(self):
cell_name = 'cell_name'
response = 1
self.mox.StubOutWithMock(self.state_manager,
'cell_delete')
self.state_manager.cell_delete(self.ctxt, cell_name).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_delete(self.ctxt, cell_name))
def test_cell_get(self):
cell_name = 'cell_name'
response = 'cell_info'
self.mox.StubOutWithMock(self.state_manager,
'cell_get')
self.state_manager.cell_get(self.ctxt, cell_name).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_get(self.ctxt, cell_name))
def test_reboot_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'reboot_instance')
self.msg_runner.reboot_instance(self.ctxt, 'fake-instance',
'HARD')
self.mox.ReplayAll()
self.cells_manager.reboot_instance(self.ctxt,
instance='fake-instance',
reboot_type='HARD')
def test_suspend_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'suspend_instance')
self.msg_runner.suspend_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.suspend_instance(self.ctxt,
instance='fake-instance')
def test_resume_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'resume_instance')
self.msg_runner.resume_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.resume_instance(self.ctxt,
instance='fake-instance')
def test_terminate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance')
self.msg_runner.terminate_instance(self.ctxt, 'fake-instance',
delete_type='delete')
self.mox.ReplayAll()
self.cells_manager.terminate_instance(self.ctxt,
instance='fake-instance',
delete_type='delete')
def test_soft_delete_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance')
self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.soft_delete_instance(self.ctxt,
instance='fake-instance')
def _test_resize_instance(self, clean_shutdown=True):
self.mox.StubOutWithMock(self.msg_runner, 'resize_instance')
self.msg_runner.resize_instance(self.ctxt, 'fake-instance',
'fake-flavor', 'fake-updates',
clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
self.cells_manager.resize_instance(
self.ctxt, instance='fake-instance', flavor='fake-flavor',
extra_instance_updates='fake-updates',
clean_shutdown=clean_shutdown)
def test_resize_instance(self):
self._test_resize_instance()
def test_resize_instance_forced_shutdown(self):
self._test_resize_instance(clean_shutdown=False)
def test_live_migrate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance')
self.msg_runner.live_migrate_instance(self.ctxt, 'fake-instance',
'fake-block', 'fake-commit',
'fake-host')
self.mox.ReplayAll()
self.cells_manager.live_migrate_instance(
self.ctxt, instance='fake-instance',
block_migration='fake-block', disk_over_commit='fake-commit',
host_name='fake-host')
def test_revert_resize(self):
self.mox.StubOutWithMock(self.msg_runner, 'revert_resize')
self.msg_runner.revert_resize(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.revert_resize(self.ctxt, instance='fake-instance')
def test_confirm_resize(self):
self.mox.StubOutWithMock(self.msg_runner, 'confirm_resize')
self.msg_runner.confirm_resize(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.confirm_resize(self.ctxt, instance='fake-instance')
def test_reset_network(self):
self.mox.StubOutWithMock(self.msg_runner, 'reset_network')
self.msg_runner.reset_network(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.reset_network(self.ctxt, instance='fake-instance')
def test_inject_network_info(self):
self.mox.StubOutWithMock(self.msg_runner, 'inject_network_info')
self.msg_runner.inject_network_info(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.inject_network_info(self.ctxt,
instance='fake-instance')
def test_snapshot_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'snapshot_instance')
self.msg_runner.snapshot_instance(self.ctxt, 'fake-instance',
'fake-id')
self.mox.ReplayAll()
self.cells_manager.snapshot_instance(self.ctxt,
instance='fake-instance',
image_id='fake-id')
def test_backup_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'backup_instance')
self.msg_runner.backup_instance(self.ctxt, 'fake-instance',
'fake-id', 'backup-type',
'rotation')
self.mox.ReplayAll()
self.cells_manager.backup_instance(self.ctxt,
instance='fake-instance',
image_id='fake-id',
backup_type='backup-type',
rotation='rotation')
def test_set_admin_password(self):
with mock.patch.object(self.msg_runner,
'set_admin_password') as set_admin_password:
self.cells_manager.set_admin_password(self.ctxt,
instance='fake-instance', new_pass='fake-password')
set_admin_password.assert_called_once_with(self.ctxt,
'fake-instance', 'fake-password')
File diff suppressed because it is too large Load Diff
@@ -1,225 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells RPC Communication Driver
"""
import mock
import oslo_messaging
from nova.cells import messaging
from nova.cells import rpc_driver
import nova.conf
from nova import context
from nova import test
from nova.tests.unit.cells import fakes
CONF = nova.conf.CONF
class CellsRPCDriverTestCase(test.NoDBTestCase):
"""Test case for Cells communication via RPC."""
def setUp(self):
super(CellsRPCDriverTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
self.driver = rpc_driver.CellsRPCDriver()
@mock.patch('nova.rpc.get_server')
def test_start_servers(self, mock_get_server):
self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
fake_msg_runner = fakes.get_message_runner('api-cell')
class FakeInterCellRPCDispatcher(object):
def __init__(_self, msg_runner):
self.assertEqual(fake_msg_runner, msg_runner)
endpoints = [test.MatchType(FakeInterCellRPCDispatcher)]
self.stub_out('nova.cells.rpc_driver.InterCellRPCDispatcher',
FakeInterCellRPCDispatcher)
rpcserver = mock.Mock()
mock_get_server.return_value = rpcserver
expected_mock_get_server_called_list = []
for message_type in messaging.MessageRunner.get_message_types():
topic = 'cells.intercell42.' + message_type
target = oslo_messaging.Target(topic=topic, server=CONF.host)
expected_mock_get_server_called_list.append(
mock.call(target, endpoints=endpoints))
self.driver.start_servers(fake_msg_runner)
rpcserver.start.assert_called()
self.assertEqual(expected_mock_get_server_called_list,
mock_get_server.call_args_list)
self.assertEqual(len(messaging.MessageRunner.get_message_types()),
rpcserver.start.call_count)
self.assertEqual(len(messaging.MessageRunner.get_message_types()),
mock_get_server.call_count)
def test_stop_servers(self):
call_info = {'stopped': []}
class FakeRPCServer(object):
def stop(self):
call_info['stopped'].append(self)
fake_servers = [FakeRPCServer() for x in range(5)]
self.driver.rpc_servers = fake_servers
self.driver.stop_servers()
self.assertEqual(fake_servers, call_info['stopped'])
def test_create_transport_once(self):
# should only construct each Transport once
rpcapi = self.driver.intercell_rpcapi
transport_url = 'amqp://fakeurl'
next_hop = fakes.FakeCellState('cellname')
next_hop.db_info['transport_url'] = transport_url
# first call to _get_transport creates a oslo.messaging.Transport obj
with mock.patch.object(oslo_messaging,
'get_rpc_transport') as get_trans:
transport = rpcapi._get_transport(next_hop)
get_trans.assert_called_once_with(rpc_driver.CONF, transport_url)
self.assertIn(transport_url, rpcapi.transports)
self.assertEqual(transport, rpcapi.transports[transport_url])
# subsequent calls should return the pre-created Transport obj
transport2 = rpcapi._get_transport(next_hop)
self.assertEqual(transport, transport2)
def test_send_message_to_cell_cast(self):
msg_runner = fakes.get_message_runner('api-cell')
cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
message = messaging._TargetedMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', cell_state, fanout=False)
expected_server_params = {'hostname': 'rpc_host2',
'password': 'password2',
'port': 3092,
'username': 'username2',
'virtual_host': 'rpc_vhost2'}
expected_url = ('rabbit://%(username)s:%(password)s@'
'%(hostname)s:%(port)d/%(virtual_host)s' %
expected_server_params)
rpcapi = self.driver.intercell_rpcapi
rpcclient = mock.Mock()
with mock.patch.object(rpcapi, '_get_client') as m_get_client:
m_get_client.return_value = rpcclient
self.driver.send_message_to_cell(cell_state, message)
m_get_client.assert_called_with(cell_state,
'cells.intercell.targeted')
self.assertEqual(expected_url,
cell_state.db_info['transport_url'])
rpcclient.cast.assert_called_with(mock.ANY,
'process_message',
message=message.to_json())
def test_send_message_to_cell_fanout_cast(self):
msg_runner = fakes.get_message_runner('api-cell')
cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
message = messaging._TargetedMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', cell_state, fanout=True)
expected_server_params = {'hostname': 'rpc_host2',
'password': 'password2',
'port': 3092,
'username': 'username2',
'virtual_host': 'rpc_vhost2'}
expected_url = ('rabbit://%(username)s:%(password)s@'
'%(hostname)s:%(port)d/%(virtual_host)s' %
expected_server_params)
rpcapi = self.driver.intercell_rpcapi
rpcclient = mock.Mock()
with mock.patch.object(rpcapi, '_get_client') as m_get_client:
m_get_client.return_value = rpcclient
rpcclient.return_value = rpcclient
rpcclient.prepare.return_value = rpcclient
self.driver.send_message_to_cell(cell_state, message)
m_get_client.assert_called_with(cell_state,
'cells.intercell.targeted')
self.assertEqual(expected_url,
cell_state.db_info['transport_url'])
rpcclient.prepare.assert_called_with(fanout=True)
rpcclient.cast.assert_called_with(mock.ANY,
'process_message',
message=message.to_json())
def test_rpc_topic_uses_message_type(self):
self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
msg_runner = fakes.get_message_runner('api-cell')
cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
message = messaging._BroadcastMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', fanout=True)
message.message_type = 'fake-message-type'
expected_server_params = {'hostname': 'rpc_host2',
'password': 'password2',
'port': 3092,
'username': 'username2',
'virtual_host': 'rpc_vhost2'}
expected_url = ('rabbit://%(username)s:%(password)s@'
'%(hostname)s:%(port)d/%(virtual_host)s' %
expected_server_params)
rpcapi = self.driver.intercell_rpcapi
rpcclient = mock.Mock()
with mock.patch.object(rpcapi, '_get_client') as m_get_client:
m_get_client.return_value = rpcclient
rpcclient.prepare(fanout=True)
rpcclient.prepare.return_value = rpcclient
self.driver.send_message_to_cell(cell_state, message)
m_get_client.assert_called_with(cell_state,
'cells.intercell42.fake-message-type')
self.assertEqual(expected_url,
cell_state.db_info['transport_url'])
rpcclient.prepare.assert_called_with(fanout=True)
rpcclient.cast.assert_called_with(mock.ANY,
'process_message',
message=message.to_json())
def test_process_message(self):
msg_runner = fakes.get_message_runner('api-cell')
dispatcher = rpc_driver.InterCellRPCDispatcher(msg_runner)
message = messaging._BroadcastMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', fanout=True)
call_info = {}
def _fake_message_from_json(json_message):
call_info['json_message'] = json_message
self.assertEqual(message.to_json(), json_message)
return message
def _fake_process():
call_info['process_called'] = True
msg_runner.message_from_json = _fake_message_from_json
message.process = _fake_process
dispatcher.process_message(self.ctxt, message.to_json())
self.assertEqual(message.to_json(), call_info['json_message'])
self.assertTrue(call_info['process_called'])
-734
View File
@@ -1,734 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells RPCAPI
"""
import mock
from oslo_utils.fixture import uuidsentinel as uuids
import six
from nova.cells import rpcapi as cells_rpcapi
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
class CellsAPITestCase(test.NoDBTestCase):
"""Test case for cells.api interfaces."""
def setUp(self):
super(CellsAPITestCase, self).setUp()
self.fake_topic = 'cells'
self.fake_context = 'fake_context'
self.flags(enable=True, group='cells')
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _stub_rpc_method(self, rpc_method, result):
call_info = {}
orig_prepare = self.cells_rpcapi.client.prepare
def fake_rpc_prepare(self_rpcclient, **kwargs):
if 'version' in kwargs:
call_info['version'] = kwargs.pop('version')
return self.cells_rpcapi.client
def fake_csv(self, version):
return orig_prepare(version).can_send_version()
def fake_rpc_method(self, ctxt, method, **kwargs):
call_info['context'] = ctxt
call_info['method'] = method
call_info['args'] = kwargs
return result
self.stub_out('oslo_messaging.rpc.client.RPCClient.prepare',
fake_rpc_prepare)
self.stub_out('oslo_messaging.rpc.client.RPCClient.can_send_version',
fake_csv)
self.stub_out('oslo_messaging.rpc.client.RPCClient.%s' % rpc_method,
fake_rpc_method)
return call_info
def _check_result(self, call_info, method, args, version=None):
self.assertEqual(self.fake_topic,
self.cells_rpcapi.client.target.topic)
self.assertEqual(self.fake_context, call_info['context'])
self.assertEqual(method, call_info['method'])
self.assertEqual(args, call_info['args'])
if version is not None:
self.assertIn('version', call_info)
self.assertIsInstance(call_info['version'], six.string_types,
msg="Message version %s is not a string" %
call_info['version'])
self.assertEqual(version, call_info['version'])
else:
self.assertNotIn('version', call_info)
def test_cast_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': False}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.cast_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
def test_call_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
fake_response = 'fake_response'
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': True}
call_info = self._stub_rpc_method('call', fake_response)
result = self.cells_rpcapi.call_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
self.assertEqual(fake_response, result)
def test_build_instances(self):
call_info = self._stub_rpc_method('cast', None)
instances = [objects.Instance(id=1),
objects.Instance(id=2)]
self.cells_rpcapi.build_instances(
self.fake_context, instances=instances,
image={'fake': 'image'}, arg1=1, arg2=2, arg3=3)
expected_args = {'build_inst_kwargs': {'instances': instances,
'image': {'fake': 'image'},
'arg1': 1,
'arg2': 2,
'arg3': 3}}
self._check_result(call_info, 'build_instances',
expected_args, version='1.34')
def test_get_capacities(self):
capacity_info = {"capacity": "info"}
call_info = self._stub_rpc_method('call',
result=capacity_info)
result = self.cells_rpcapi.get_capacities(self.fake_context,
cell_name="name")
self._check_result(call_info, 'get_capacities',
{'cell_name': 'name'}, version='1.9')
self.assertEqual(capacity_info, result)
def test_instance_delete_everywhere(self):
instance = fake_instance.fake_instance_obj(self.fake_context)
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_delete_everywhere(
self.fake_context, instance,
'fake-type')
expected_args = {'instance': instance,
'delete_type': 'fake-type'}
self._check_result(call_info, 'instance_delete_everywhere',
expected_args, version='1.27')
def test_get_cell_info_for_neighbors(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_cell_info_for_neighbors(
self.fake_context)
self._check_result(call_info, 'get_cell_info_for_neighbors', {},
version='1.1')
self.assertEqual('fake_response', result)
def test_sync_instances(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.sync_instances(self.fake_context,
project_id='fake_project', updated_since='fake_time',
deleted=True)
expected_args = {'project_id': 'fake_project',
'updated_since': 'fake_time',
'deleted': True}
self._check_result(call_info, 'sync_instances', expected_args,
version='1.1')
def test_service_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
fake_filters = {'key1': 'val1', 'key2': 'val2'}
result = self.cells_rpcapi.service_get_all(self.fake_context,
filters=fake_filters)
expected_args = {'filters': fake_filters}
self._check_result(call_info, 'service_get_all', expected_args,
version='1.2')
self.assertEqual('fake_response', result)
def test_service_get_by_compute_host(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_get_by_compute_host(
self.fake_context, host_name='fake-host-name')
expected_args = {'host_name': 'fake-host-name'}
self._check_result(call_info, 'service_get_by_compute_host',
expected_args,
version='1.2')
self.assertEqual('fake_response', result)
def test_get_host_uptime(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_host_uptime(
self.fake_context, host_name='fake-host-name')
expected_args = {'host_name': 'fake-host-name'}
self._check_result(call_info, 'get_host_uptime',
expected_args,
version='1.17')
self.assertEqual('fake_response', result)
def test_service_update(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_update(
self.fake_context, host_name='fake-host-name',
binary='nova-api', params_to_update={'disabled': True})
expected_args = {
'host_name': 'fake-host-name',
'binary': 'nova-api',
'params_to_update': {'disabled': True}}
self._check_result(call_info, 'service_update',
expected_args,
version='1.7')
self.assertEqual('fake_response', result)
def test_service_delete(self):
call_info = self._stub_rpc_method('call', None)
cell_service_id = 'cell@id'
result = self.cells_rpcapi.service_delete(
self.fake_context, cell_service_id=cell_service_id)
expected_args = {'cell_service_id': cell_service_id}
self._check_result(call_info, 'service_delete',
expected_args, version='1.26')
self.assertIsNone(result)
def test_proxy_rpc_to_manager(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.proxy_rpc_to_manager(
self.fake_context, rpc_message='fake-msg',
topic='fake-topic', call=True)
expected_args = {'rpc_message': 'fake-msg',
'topic': 'fake-topic',
'call': True}
self._check_result(call_info, 'proxy_rpc_to_manager',
expected_args,
version='1.2')
self.assertEqual('fake_response', result)
def test_task_log_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.task_log_get_all(self.fake_context,
task_name='fake_name',
period_beginning='fake_begin',
period_ending='fake_end',
host='fake_host',
state='fake_state')
expected_args = {'task_name': 'fake_name',
'period_beginning': 'fake_begin',
'period_ending': 'fake_end',
'host': 'fake_host',
'state': 'fake_state'}
self._check_result(call_info, 'task_log_get_all', expected_args,
version='1.3')
self.assertEqual('fake_response', result)
def test_compute_node_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get_all(self.fake_context,
hypervisor_match='fake-match')
expected_args = {'hypervisor_match': 'fake-match'}
self._check_result(call_info, 'compute_node_get_all', expected_args,
version='1.4')
self.assertEqual('fake_response', result)
def test_compute_node_stats(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_stats(self.fake_context)
expected_args = {}
self._check_result(call_info, 'compute_node_stats',
expected_args, version='1.4')
self.assertEqual('fake_response', result)
def test_compute_node_get(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get(self.fake_context,
'fake_compute_id')
expected_args = {'compute_id': 'fake_compute_id'}
self._check_result(call_info, 'compute_node_get',
expected_args, version='1.4')
self.assertEqual('fake_response', result)
def test_compute_node_get_too_old(self):
"""Tests that ComputeHostNotFound is raised if passed a compute node
uuid but we can't send it on the 1.38 version.
"""
with mock.patch.object(self.cells_rpcapi.client, 'can_send_version',
return_value=False) as can_send_version_mock:
self.assertRaises(exception.ComputeHostNotFound,
self.cells_rpcapi.compute_node_get,
self.fake_context, uuids.compute_id)
can_send_version_mock.assert_called_once_with('1.38')
def test_compute_node_get_with_uuid(self):
"""Tests that we send a message at 1.38 if the compute node uuid is
passed in and the version check passes.
"""
fake_compute_node = objects.ComputeNode(uuid=uuids.compute_id)
call_info = self._stub_rpc_method('call', fake_compute_node)
self.assertEqual(fake_compute_node,
self.cells_rpcapi.compute_node_get(
self.fake_context, uuids.compute_id))
args = dict(compute_id=uuids.compute_id)
self._check_result(call_info, 'compute_node_get', args, version='1.38')
def test_actions_get(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.actions_get(self.fake_context,
fake_instance)
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid']}
self._check_result(call_info, 'actions_get', expected_args,
version='1.5')
self.assertEqual('fake_response', result)
def test_actions_get_no_cell(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.actions_get, self.fake_context,
fake_instance)
def test_action_get_by_request_id(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_get_by_request_id(self.fake_context,
fake_instance,
'req-fake')
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid'],
'request_id': 'req-fake'}
self._check_result(call_info, 'action_get_by_request_id',
expected_args, version='1.5')
self.assertEqual('fake_response', result)
def test_action_get_by_request_id_no_cell(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_get_by_request_id,
self.fake_context, fake_instance, 'req-fake')
def test_action_events_get(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_events_get(self.fake_context,
fake_instance,
'fake-action')
expected_args = {'cell_name': 'region!child',
'action_id': 'fake-action'}
self._check_result(call_info, 'action_events_get', expected_args,
version='1.5')
self.assertEqual('fake_response', result)
def test_action_events_get_no_cell(self):
fake_instance = {'uuid': uuids.instance, 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_events_get,
self.fake_context, fake_instance, 'fake-action')
def test_consoleauth_delete_tokens(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.consoleauth_delete_tokens(self.fake_context,
uuids.instance)
expected_args = {'instance_uuid': uuids.instance}
self._check_result(call_info, 'consoleauth_delete_tokens',
expected_args, version='1.6')
def test_validate_console_port(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.validate_console_port(self.fake_context,
uuids.instance, 'fake-port', 'fake-type')
expected_args = {'instance_uuid': uuids.instance,
'console_port': 'fake-port',
'console_type': 'fake-type'}
self._check_result(call_info, 'validate_console_port',
expected_args, version='1.6')
self.assertEqual('fake_response', result)
def test_get_migrations(self):
call_info = self._stub_rpc_method('call', None)
filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
self.cells_rpcapi.get_migrations(self.fake_context, filters)
expected_args = {'filters': filters}
self._check_result(call_info, 'get_migrations', expected_args,
version="1.11")
def test_start_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.start_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'start_instance',
expected_args, version='1.12')
def test_stop_instance_cast(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=True,
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'do_cast': True,
'clean_shutdown': True}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.31')
def test_stop_instance_call(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=False,
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'do_cast': False,
'clean_shutdown': True}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.31')
self.assertEqual('fake_response', result)
def test_cell_create(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_create(self.fake_context, 'values')
expected_args = {'values': 'values'}
self._check_result(call_info, 'cell_create',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_cell_update(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_update(self.fake_context,
'cell_name', 'values')
expected_args = {'cell_name': 'cell_name',
'values': 'values'}
self._check_result(call_info, 'cell_update',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_cell_delete(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_delete(self.fake_context,
'cell_name')
expected_args = {'cell_name': 'cell_name'}
self._check_result(call_info, 'cell_delete',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_cell_get(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_get(self.fake_context,
'cell_name')
expected_args = {'cell_name': 'cell_name'}
self._check_result(call_info, 'cell_get',
expected_args, version='1.13')
self.assertEqual('fake_response', result)
def test_reboot_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.reboot_instance(
self.fake_context, 'fake-instance',
block_device_info='ignored', reboot_type='HARD')
expected_args = {'instance': 'fake-instance',
'reboot_type': 'HARD'}
self._check_result(call_info, 'reboot_instance',
expected_args, version='1.14')
def test_pause_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.pause_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'pause_instance',
expected_args, version='1.19')
def test_unpause_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.unpause_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'unpause_instance',
expected_args, version='1.19')
def test_suspend_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.suspend_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'suspend_instance',
expected_args, version='1.15')
def test_resume_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resume_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'resume_instance',
expected_args, version='1.15')
def test_terminate_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.terminate_instance(self.fake_context,
'fake-instance', [],
delete_type='delete')
expected_args = {'instance': 'fake-instance',
'delete_type': 'delete'}
self._check_result(call_info, 'terminate_instance',
expected_args, version='1.36')
def test_soft_delete_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.soft_delete_instance(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'soft_delete_instance',
expected_args, version='1.18')
def test_resize_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resize_instance(self.fake_context,
'fake-instance',
dict(cow='moo'),
'fake-hint',
'fake-flavor',
'fake-reservations',
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'flavor': 'fake-flavor',
'extra_instance_updates': dict(cow='moo'),
'clean_shutdown': True}
self._check_result(call_info, 'resize_instance',
expected_args, version='1.33')
def test_resize_instance_not_passing_request_spec(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resize_instance(self.fake_context,
'fake-instance',
dict(cow='moo'),
'fake-hint',
'fake-flavor',
'fake-reservations',
clean_shutdown=True,
request_spec='fake-spec')
expected_args = {'instance': 'fake-instance',
'flavor': 'fake-flavor',
'extra_instance_updates': dict(cow='moo'),
'clean_shutdown': True}
self._check_result(call_info, 'resize_instance',
expected_args, version='1.33')
def test_live_migrate_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.live_migrate_instance(self.fake_context,
'fake-instance',
'fake-host',
'fake-block',
'fake-commit')
expected_args = {'instance': 'fake-instance',
'block_migration': 'fake-block',
'disk_over_commit': 'fake-commit',
'host_name': 'fake-host'}
self._check_result(call_info, 'live_migrate_instance',
expected_args, version='1.20')
def test_live_migrate_instance_not_passing_request_spec(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.live_migrate_instance(self.fake_context,
'fake-instance',
'fake-host',
'fake-block',
'fake-commit',
'fake-spec')
expected_args = {'instance': 'fake-instance',
'block_migration': 'fake-block',
'disk_over_commit': 'fake-commit',
'host_name': 'fake-host'}
self._check_result(call_info, 'live_migrate_instance',
expected_args, version='1.20')
def test_rebuild_instance_not_passing_request_spec(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.rebuild_instance(self.fake_context,
'fake-instance',
'fake-pass',
'fake-files',
'fake-image_ref',
'fake-orig_image_ref',
'fake-orig_sys_metadata',
'fake-bdms',
recreate=False,
on_shared_storage=False,
host=None,
preserve_ephemeral=False,
request_spec='fake-spec',
kwargs=None)
expected_args = {'instance': 'fake-instance',
'image_href': 'fake-image_ref',
'admin_password': 'fake-pass',
'files_to_inject': 'fake-files',
'preserve_ephemeral': False,
'kwargs': None}
self._check_result(call_info, 'rebuild_instance',
expected_args, version='1.25')
def test_revert_resize(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.revert_resize(self.fake_context,
'fake-instance',
'fake-migration',
'fake-dest',
'resvs')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'revert_resize',
expected_args, version='1.21')
def test_confirm_resize(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.confirm_resize(self.fake_context,
'fake-instance',
'fake-migration',
'fake-source',
'resvs')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'confirm_resize',
expected_args, version='1.21')
def test_reset_network(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.reset_network(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'reset_network',
expected_args, version='1.22')
def test_inject_network_info(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.inject_network_info(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'inject_network_info',
expected_args, version='1.23')
def test_snapshot_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.snapshot_instance(self.fake_context,
'fake-instance',
'image-id')
expected_args = {'instance': 'fake-instance',
'image_id': 'image-id'}
self._check_result(call_info, 'snapshot_instance',
expected_args, version='1.24')
def test_backup_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.backup_instance(self.fake_context,
'fake-instance',
'image-id',
'backup-type',
'rotation')
expected_args = {'instance': 'fake-instance',
'image_id': 'image-id',
'backup_type': 'backup-type',
'rotation': 'rotation'}
self._check_result(call_info, 'backup_instance',
expected_args, version='1.24')
def test_set_admin_password(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.set_admin_password(self.fake_context,
'fake-instance', 'fake-password')
expected_args = {'instance': 'fake-instance',
'new_pass': 'fake-password'}
self._check_result(call_info, 'set_admin_password',
expected_args, version='1.29')
@@ -1,474 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellsScheduler
"""
import copy
import mock
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
from nova import block_device
from nova.cells import filters
from nova.cells import weights
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.cells import fakes
from nova.tests.unit import fake_block_device
class FakeFilterClass1(filters.BaseCellFilter):
pass
class FakeFilterClass2(filters.BaseCellFilter):
pass
class FakeWeightClass1(weights.BaseCellWeigher):
def _weigh_object(self, obj, weight_properties):
pass
class FakeWeightClass2(weights.BaseCellWeigher):
def _weigh_object(self, obj, weight_properties):
pass
class CellsSchedulerTestCase(test.TestCase):
"""Test case for CellsScheduler class."""
def setUp(self):
super(CellsSchedulerTestCase, self).setUp()
self.flags(scheduler_filter_classes=[], scheduler_weight_classes=[],
group='cells')
self._init_cells_scheduler()
def _init_cells_scheduler(self):
fakes.init(self)
self.msg_runner = fakes.get_message_runner('api-cell')
self.scheduler = self.msg_runner.scheduler
self.state_manager = self.msg_runner.state_manager
self.my_cell_state = self.state_manager.get_my_state()
self.ctxt = context.RequestContext('fake', 'fake')
instance_uuids = []
for x in range(3):
instance_uuids.append(uuidutils.generate_uuid())
self.instance_uuids = instance_uuids
self.instances = [objects.Instance(uuid=uuid, id=id)
for id, uuid in enumerate(instance_uuids)]
self.request_spec = {
'num_instances': len(instance_uuids),
'instance_properties': self.instances[0],
'instance_type': 'fake_type',
'image': 'fake_image'}
self.build_inst_kwargs = {
'instances': self.instances,
'image': 'fake_image',
'filter_properties': {'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
@mock.patch('nova.objects.Instance.update')
def test_create_instances_here_pops_problematic_properties(self,
mock_update):
values = {
'uuid': uuidsentinel.instance,
'metadata': [],
'id': 1,
'name': 'foo',
'info_cache': 'bar',
'security_groups': 'not secure',
'flavor': 'chocolate',
'pci_requests': 'no thanks',
'ec2_ids': 'prime',
}
block_device_mapping = [
objects.BlockDeviceMapping(context=self.ctxt,
**fake_block_device.FakeDbBlockDeviceDict(
block_device.create_image_bdm(
uuidsentinel.fake_image_ref),
anon=True))
]
@mock.patch.object(self.scheduler.compute_api,
'create_db_entry_for_new_instance')
@mock.patch.object(self.scheduler.compute_api,
'_bdm_validate_set_size_and_instance')
def test(mock_bdm_validate, mock_create_db):
self.scheduler._create_instances_here(
self.ctxt, [uuidsentinel.instance], values,
objects.Flavor(), 'foo', [], block_device_mapping)
test()
# NOTE(danms): Make sure that only the expected properties
# are applied to the instance object. The complex ones that
# would have been mangled over RPC should be removed.
mock_update.assert_called_once_with(
{'uuid': uuidsentinel.instance,
'metadata': {}})
def test_build_instances_selects_child_cell(self):
# Make sure there's no capacity info so we're sure to
# select a child cell
our_cell_info = self.state_manager.get_my_state()
our_cell_info.capacities = {}
call_info = {'times': 0}
orig_fn = self.msg_runner.build_instances
def msg_runner_build_instances(self_mr, ctxt, target_cell,
build_inst_kwargs):
# This gets called twice. Once for our running it
# in this cell.. and then it'll get called when the
# child cell is picked. So, first time.. just run it
# like normal.
if not call_info['times']:
call_info['times'] += 1
return orig_fn(ctxt, target_cell, build_inst_kwargs)
call_info['ctxt'] = ctxt
call_info['target_cell'] = target_cell
call_info['build_inst_kwargs'] = build_inst_kwargs
def fake_build_request_spec(image, instances):
request_spec = {
'num_instances': len(instances),
'image': image}
return request_spec
self.stub_out('nova.cells.messaging.MessageRunner.build_instances',
msg_runner_build_instances)
self.stub_out('nova.scheduler.utils.build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
self.build_inst_kwargs)
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.build_inst_kwargs,
call_info['build_inst_kwargs'])
child_cells = self.state_manager.get_child_cells()
self.assertIn(call_info['target_cell'], child_cells)
def test_build_instances_selects_current_cell(self):
self.flags(scheduler='nova.cells.scheduler.CellsScheduler',
group='cells')
# Make sure there's no child cells so that we will be
# selected
self.state_manager.child_cells = {}
call_info = {}
build_inst_kwargs = copy.deepcopy(self.build_inst_kwargs)
def fake_create_instances_here(self_cs, ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
return self.instances
def fake_rpc_build_instances(self, ctxt, **build_inst_kwargs):
call_info['build_inst_kwargs'] = build_inst_kwargs
def fake_build_request_spec(image, instances):
request_spec = {
'num_instances': len(instances),
'image': image}
return request_spec
self.stub_out('nova.cells.scheduler.CellsScheduler.'
'_create_instances_here',
fake_create_instances_here)
self.stub_out('nova.conductor.api.ComputeTaskAPI.'
'build_instances', fake_rpc_build_instances)
self.stub_out('nova.scheduler.utils.build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
build_inst_kwargs)
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.build_inst_kwargs['instances'][0]['id'],
call_info['instance_properties']['id'])
self.assertEqual(
self.build_inst_kwargs['filter_properties']['instance_type'],
call_info['instance_type'])
self.assertEqual(self.build_inst_kwargs['image'], call_info['image'])
self.assertEqual(self.build_inst_kwargs['security_groups'],
call_info['security_groups'])
self.assertEqual(self.build_inst_kwargs['block_device_mapping'],
call_info['block_device_mapping'])
self.assertEqual(build_inst_kwargs,
call_info['build_inst_kwargs'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
def test_build_instances_retries_when_no_cells_avail(self):
self.flags(scheduler='nova.cells.scheduler.CellsScheduler',
scheduler_retries=7, group='cells')
call_info = {'num_tries': 0, 'errored_uuids': []}
def fake_grab_target_cells(self, filter_properties):
call_info['num_tries'] += 1
raise exception.NoCellsAvailable()
def fake_sleep(_secs):
return
def fake_instance_save(inst):
self.assertEqual(vm_states.ERROR, inst.vm_state)
call_info['errored_uuids'].append(inst.uuid)
def fake_build_request_spec(image, instances):
request_spec = {
'num_instances': len(instances),
'image': image}
return request_spec
self.stub_out('nova.cells.scheduler.CellsScheduler._grab_target_cells',
fake_grab_target_cells)
self.stub_out('time.sleep', fake_sleep)
self.stub_out('nova.objects.Instance.save', fake_instance_save)
self.stub_out('nova.scheduler.utils.build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
self.build_inst_kwargs)
self.assertEqual(8, call_info['num_tries'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids'])
def test_filter_schedule_skipping(self):
# if a filter handles scheduling, short circuit
mock_func = mock.Mock()
self.scheduler._grab_target_cells = mock.Mock(return_value=None)
self.scheduler._schedule_build_to_cells(None, None, None,
mock_func, None)
mock_func.assert_not_called()
def test_cells_filter_args_correct(self):
# Re-init our fakes with some filters.
our_path = 'nova.tests.unit.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeFilterClass1',
our_path + '.' + 'FakeFilterClass2']
self.flags(scheduler='nova.cells.scheduler.CellsScheduler',
scheduler_filter_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {}
def fake_create_instances_here(self_cs, ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
def fake_rpc_build_instances(self, ctxt, **host_sched_kwargs):
call_info['host_sched_kwargs'] = host_sched_kwargs
def fake_get_filtered_objs(self, filters, cells, filt_properties):
call_info['filt_objects'] = filters
call_info['filt_cells'] = cells
call_info['filt_props'] = filt_properties
return cells
def fake_build_request_spec(image, instances):
request_spec = {
'num_instances': len(instances),
'instance_properties': instances[0],
'image': image,
'instance_type': 'fake_type'}
return request_spec
self.stub_out('nova.cells.scheduler.CellsScheduler.'
'_create_instances_here',
fake_create_instances_here)
self.stub_out('nova.conductor.api.ComputeTaskAPI.'
'build_instances', fake_rpc_build_instances)
self.stub_out('nova.scheduler.utils.build_request_spec',
fake_build_request_spec)
self.stub_out('nova.cells.filters.CellFilterHandler.'
'get_filtered_objects',
fake_get_filtered_objs)
host_sched_kwargs = {'image': 'fake_image',
'instances': self.instances,
'filter_properties':
{'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, host_sched_kwargs)
# Our cell was selected.
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.request_spec['instance_properties']['id'],
call_info['instance_properties']['id'])
self.assertEqual(self.request_spec['instance_type'],
call_info['instance_type'])
self.assertEqual(self.request_spec['image'], call_info['image'])
self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
# Filter args are correct
expected_filt_props = {'context': self.ctxt,
'scheduler': self.scheduler,
'routing_path': self.my_cell_state.name,
'host_sched_kwargs': host_sched_kwargs,
'request_spec': self.request_spec,
'instance_type': 'fake_type'}
self.assertEqual(expected_filt_props, call_info['filt_props'])
self.assertEqual([FakeFilterClass1, FakeFilterClass2],
[obj.__class__ for obj in call_info['filt_objects']])
self.assertEqual([self.my_cell_state], call_info['filt_cells'])
def test_cells_filter_returning_none(self):
# Re-init our fakes with some filters.
our_path = 'nova.tests.unit.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeFilterClass1',
our_path + '.' + 'FakeFilterClass2']
self.flags(scheduler='nova.cells.scheduler.CellsScheduler',
scheduler_filter_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {'scheduled': False}
def fake_create_instances_here(self, ctxt, request_spec):
# Should not be called
call_info['scheduled'] = True
def fake_get_filtered_objs(filter_classes, cells, filt_properties):
# Should cause scheduling to be skipped. Means that the
# filter did it.
return None
self.stub_out('nova.cells.scheduler.CellsScheduler.'
'_create_instances_here',
fake_create_instances_here)
self.stub_out('nova.cells.filters.CellFilterHandler.'
'get_filtered_objects',
fake_get_filtered_objs)
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, {})
self.assertFalse(call_info['scheduled'])
def test_cells_weight_args_correct(self):
# Re-init our fakes with some filters.
our_path = 'nova.tests.unit.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeWeightClass1',
our_path + '.' + 'FakeWeightClass2']
self.flags(scheduler='nova.cells.scheduler.CellsScheduler',
scheduler_weight_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {}
def fake_create_instances_here(self_cs, ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
def fake_rpc_build_instances(self, ctxt, **host_sched_kwargs):
call_info['host_sched_kwargs'] = host_sched_kwargs
def fake_get_weighed_objs(self, weighers, cells, filt_properties):
call_info['weighers'] = weighers
call_info['weight_cells'] = cells
call_info['weight_props'] = filt_properties
return [weights.WeightedCell(cells[0], 0.0)]
def fake_build_request_spec(image, instances):
request_spec = {
'num_instances': len(instances),
'instance_properties': instances[0],
'image': image,
'instance_type': 'fake_type'}
return request_spec
self.stub_out('nova.cells.scheduler.CellsScheduler.'
'_create_instances_here',
fake_create_instances_here)
self.stub_out('nova.scheduler.utils.build_request_spec',
fake_build_request_spec)
self.stub_out('nova.conductor.api.ComputeTaskAPI.'
'build_instances', fake_rpc_build_instances)
self.stub_out('nova.cells.weights.CellWeightHandler.'
'get_weighed_objects',
fake_get_weighed_objs)
host_sched_kwargs = {'image': 'fake_image',
'instances': self.instances,
'filter_properties':
{'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, host_sched_kwargs)
# Our cell was selected.
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.request_spec['instance_properties']['id'],
call_info['instance_properties']['id'])
self.assertEqual(self.request_spec['instance_type'],
call_info['instance_type'])
self.assertEqual(self.request_spec['image'], call_info['image'])
self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
# Weight args are correct
expected_filt_props = {'context': self.ctxt,
'scheduler': self.scheduler,
'routing_path': self.my_cell_state.name,
'host_sched_kwargs': host_sched_kwargs,
'request_spec': self.request_spec,
'instance_type': 'fake_type'}
self.assertEqual(expected_filt_props, call_info['weight_props'])
self.assertEqual([FakeWeightClass1, FakeWeightClass2],
[obj.__class__ for obj in call_info['weighers']])
self.assertEqual([self.my_cell_state], call_info['weight_cells'])
@@ -1,380 +0,0 @@
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellStateManager
"""
import datetime
import time
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import timeutils
import six
from nova.cells import state
from nova.db.sqlalchemy import models
from nova import exception
from nova import objects
from nova import test
from nova import utils
FAKE_COMPUTES = [
('host1', 1024, 100, 0, 0),
('host2', 1024, 100, -1, -1),
('host3', 1024, 100, 1024, 100),
('host4', 1024, 100, 300, 30),
]
FAKE_COMPUTES_N_TO_ONE = [
('host1', 1024, 100, 0, 0),
('host1', 1024, 100, -1, -1),
('host2', 1024, 100, 1024, 100),
('host2', 1024, 100, 300, 30),
]
FAKE_SERVICES = [
('host1', 0),
('host2', 0),
('host3', 0),
('host4', 3600),
]
# NOTE(alaski): It's important to have multiple types that end up having the
# same memory and disk requirements. So two types need the same first value,
# and two need the second and third values to add up to the same thing.
FAKE_ITYPES = [
(0, 0, 0),
(50, 12, 13),
(50, 2, 4),
(10, 20, 5),
]
def _create_fake_node(host, total_mem, total_disk, free_mem, free_disk):
return objects.ComputeNode(host=host,
memory_mb=total_mem,
local_gb=total_disk,
free_ram_mb=free_mem,
free_disk_gb=free_disk)
@classmethod
def _fake_service_get_all_by_binary(cls, context, binary):
def _node(host, total_mem, total_disk, free_mem, free_disk):
now = timeutils.utcnow()
return objects.Service(host=host,
disabled=False,
forced_down=False,
last_seen_up=now)
return [_node(*fake) for fake in FAKE_COMPUTES]
@classmethod
def _fake_service_get_all_by_binary_nodedown(cls, context, binary):
def _service(host, noupdate_sec):
now = timeutils.utcnow()
last_seen = now - datetime.timedelta(seconds=noupdate_sec)
return objects.Service(host=host,
disabled=False,
forced_down=False,
last_seen_up=last_seen,
binary=binary)
return [_service(*fake) for fake in FAKE_SERVICES]
@classmethod
def _fake_compute_node_get_all(cls, context):
return [_create_fake_node(*fake) for fake in FAKE_COMPUTES]
@classmethod
def _fake_compute_node_n_to_one_get_all(cls, context):
return [_create_fake_node(*fake) for fake in FAKE_COMPUTES_N_TO_ONE]
def _fake_cell_get_all(context):
return []
def _fake_instance_type_all(*args):
def _type(mem, root, eph):
return objects.Flavor(root_gb=root,
ephemeral_gb=eph,
memory_mb=mem)
return [_type(*fake) for fake in FAKE_ITYPES]
class TestCellsStateManager(test.NoDBTestCase):
def setUp(self):
super(TestCellsStateManager, self).setUp()
self.stub_out('nova.objects.ComputeNodeList.get_all',
_fake_compute_node_get_all)
self.stub_out('nova.objects.ServiceList.get_by_binary',
_fake_service_get_all_by_binary)
self.stub_out('nova.objects.FlavorList.get_all',
_fake_instance_type_all)
self.stub_out('nova.db.api.cell_get_all', _fake_cell_get_all)
def test_cells_config_not_found(self):
self.flags(cells_config='no_such_file_exists.conf', group='cells')
e = self.assertRaises(cfg.ConfigFilesNotFoundError,
state.CellStateManager)
self.assertEqual(['no_such_file_exists.conf'], e.config_files)
@mock.patch.object(cfg.ConfigOpts, 'find_file')
@mock.patch.object(utils, 'read_cached_file')
def test_filemanager_returned(self, mock_read_cached_file, mock_find_file):
mock_find_file.return_value = "/etc/nova/cells.json"
mock_read_cached_file.return_value = (False, six.StringIO('{}'))
self.flags(cells_config='cells.json', group='cells')
manager = state.CellStateManager()
self.assertIsInstance(manager,
state.CellStateManagerFile)
self.assertRaises(exception.CellsUpdateUnsupported,
manager.cell_create, None, None)
self.assertRaises(exception.CellsUpdateUnsupported,
manager.cell_update, None, None, None)
self.assertRaises(exception.CellsUpdateUnsupported,
manager.cell_delete, None, None)
def test_dbmanager_returned(self):
self.assertIsInstance(state.CellStateManager(),
state.CellStateManagerDB)
def test_capacity_no_reserve(self):
# utilize entire cell
cap = self._capacity(0.0)
cell_free_ram = sum(max(0, compute[3]) for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(max(0, compute[4])
for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = cell_free_ram // 50
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 5 # 4 on host 3, 1 on host4
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
def test_capacity_full_reserve(self):
# reserve the entire cell. (utilize zero percent)
cap = self._capacity(100.0)
cell_free_ram = sum(max(0, compute[3]) for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(max(0, compute[4])
for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
self.assertEqual(0, cap['disk_free']['units_by_mb'][str(sz)])
def test_capacity_part_reserve(self):
# utilize half the cell's free capacity
cap = self._capacity(50.0)
cell_free_ram = sum(max(0, compute[3]) for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(max(0, compute[4])
for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = 10 # 10 from host 3
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 2 # 2 on host 3
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
def _get_state_manager(self, reserve_percent=0.0):
self.flags(reserve_percent=reserve_percent, group='cells')
return state.CellStateManager()
def _capacity(self, reserve_percent):
state_manager = self._get_state_manager(reserve_percent)
my_state = state_manager.get_my_state()
return my_state.capacities
class TestCellsStateManagerNToOne(TestCellsStateManager):
def setUp(self):
super(TestCellsStateManagerNToOne, self).setUp()
self.stub_out('nova.objects.ComputeNodeList.get_all',
_fake_compute_node_n_to_one_get_all)
def test_capacity_part_reserve(self):
# utilize half the cell's free capacity
cap = self._capacity(50.0)
cell_free_ram = sum(max(0, compute[3])
for compute in FAKE_COMPUTES_N_TO_ONE)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = (1024 *
sum(max(0, compute[4])
for compute in FAKE_COMPUTES_N_TO_ONE))
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = 6 # 6 from host 2
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 1 # 1 on host 2
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
class TestCellsStateManagerNodeDown(test.NoDBTestCase):
def setUp(self):
super(TestCellsStateManagerNodeDown, self).setUp()
self.stub_out('nova.objects.ComputeNodeList.get_all',
_fake_compute_node_get_all)
self.stub_out('nova.objects.ServiceList.get_by_binary',
_fake_service_get_all_by_binary_nodedown)
self.stub_out('nova.objects.FlavorList.get_all',
_fake_instance_type_all)
self.stub_out('nova.db.api.cell_get_all', _fake_cell_get_all)
def test_capacity_no_reserve_nodedown(self):
cap = self._capacity(0.0)
cell_free_ram = sum(max(0, compute[3])
for compute in FAKE_COMPUTES[:-1])
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
free_disk = sum(max(0, compute[4])
for compute in FAKE_COMPUTES[:-1])
cell_free_disk = 1024 * free_disk
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
def _get_state_manager(self, reserve_percent=0.0):
self.flags(reserve_percent=reserve_percent, group='cells')
return state.CellStateManager()
def _capacity(self, reserve_percent):
state_manager = self._get_state_manager(reserve_percent)
my_state = state_manager.get_my_state()
return my_state.capacities
class TestCellStateManagerException(test.NoDBTestCase):
@mock.patch.object(time, 'sleep')
def test_init_db_error(self, mock_sleep):
class TestCellStateManagerDB(state.CellStateManagerDB):
def __init__(self):
self._cell_data_sync = mock.Mock()
self._cell_data_sync.side_effect = [db_exc.DBError(), []]
super(TestCellStateManagerDB, self).__init__()
test = TestCellStateManagerDB()
mock_sleep.assert_called_once_with(30)
self.assertEqual(2, test._cell_data_sync.call_count)
class TestCellsGetCapacity(TestCellsStateManager):
def setUp(self):
super(TestCellsGetCapacity, self).setUp()
self.capacities = {"ram_free": 1234}
self.state_manager = self._get_state_manager()
cell = models.Cell(name="cell_name")
other_cell = models.Cell(name="other_cell_name")
cell.capacities = self.capacities
other_cell.capacities = self.capacities
self.state_manager.child_cells = {"cell_name": cell,
"other_cell_name": other_cell}
self.state_manager.my_cell_state.capacities = self.capacities
def test_get_cell_capacity_for_all_cells(self):
capacities = self.state_manager.get_capacities()
self.assertEqual({"ram_free": 3702}, capacities)
def test_get_cell_capacity_for_the_parent_cell(self):
capacities = self.state_manager.\
get_capacities(self.state_manager.my_cell_state.name)
self.assertEqual({"ram_free": 3702}, capacities)
def test_get_cell_capacity_for_a_cell(self):
self.assertEqual(self.capacities,
self.state_manager.get_capacities(cell_name="cell_name"))
def test_get_cell_capacity_for_non_existing_cell(self):
self.assertRaises(exception.CellNotFound,
self.state_manager.get_capacities,
cell_name="invalid_cell_name")
class FakeCellStateManager(object):
def __init__(self):
self.called = []
def _cell_data_sync(self, force=False):
self.called.append(('_cell_data_sync', force))
class TestSyncDecorators(test.NoDBTestCase):
def test_sync_before(self):
manager = FakeCellStateManager()
def test(inst, *args, **kwargs):
self.assertEqual(manager, inst)
self.assertEqual((1, 2, 3), args)
self.assertEqual(dict(a=4, b=5, c=6), kwargs)
return 'result'
wrapper = state.sync_before(test)
result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
self.assertEqual('result', result)
self.assertEqual([('_cell_data_sync', False)], manager.called)
def test_sync_after(self):
manager = FakeCellStateManager()
def test(inst, *args, **kwargs):
self.assertEqual(manager, inst)
self.assertEqual((1, 2, 3), args)
self.assertEqual(dict(a=4, b=5, c=6), kwargs)
return 'result'
wrapper = state.sync_after(test)
result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
self.assertEqual('result', result)
self.assertEqual([('_cell_data_sync', True)], manager.called)
-233
View File
@@ -1,233 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells Utility methods
"""
import inspect
import random
import mock
from nova.cells import utils as cells_utils
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
class CellsUtilsTestCase(test.NoDBTestCase):
"""Test case for Cells utility methods."""
def test_get_instances_to_sync(self):
fake_context = 'fake_context'
call_info = {'get_all': 0, 'shuffle': 0}
def random_shuffle(_list):
call_info['shuffle'] += 1
@staticmethod
def instance_get_all_by_filters(context, filters,
sort_key, sort_dir, limit, marker):
# Pretend we return a full list the first time otherwise we loop
# infinitely
if marker is not None:
return []
self.assertEqual(fake_context, context)
self.assertEqual('deleted', sort_key)
self.assertEqual('asc', sort_dir)
call_info['got_filters'] = filters
call_info['get_all'] += 1
instances = [fake_instance.fake_db_instance() for i in range(3)]
return instances
self.stub_out('nova.objects.InstanceList.get_by_filters',
instance_get_all_by_filters)
self.stub_out('random.shuffle', random_shuffle)
instances = cells_utils.get_instances_to_sync(fake_context)
self.assertTrue(inspect.isgenerator(instances))
self.assertEqual(3, len([x for x in instances]))
self.assertEqual(1, call_info['get_all'])
self.assertEqual({}, call_info['got_filters'])
self.assertEqual(0, call_info['shuffle'])
instances = cells_utils.get_instances_to_sync(fake_context,
shuffle=True)
self.assertTrue(inspect.isgenerator(instances))
self.assertEqual(3, len([x for x in instances]))
self.assertEqual(2, call_info['get_all'])
self.assertEqual({}, call_info['got_filters'])
self.assertEqual(1, call_info['shuffle'])
instances = cells_utils.get_instances_to_sync(fake_context,
updated_since='fake-updated-since')
self.assertTrue(inspect.isgenerator(instances))
self.assertEqual(3, len([x for x in instances]))
self.assertEqual(3, call_info['get_all'])
self.assertEqual({'changes-since': 'fake-updated-since'},
call_info['got_filters'])
self.assertEqual(1, call_info['shuffle'])
instances = cells_utils.get_instances_to_sync(fake_context,
project_id='fake-project',
updated_since='fake-updated-since', shuffle=True)
self.assertTrue(inspect.isgenerator(instances))
self.assertEqual(3, len([x for x in instances]))
self.assertEqual(4, call_info['get_all'])
self.assertEqual({'changes-since': 'fake-updated-since',
'project_id': 'fake-project'}, call_info['got_filters'])
self.assertEqual(2, call_info['shuffle'])
@mock.patch.object(objects.InstanceList, 'get_by_filters')
@mock.patch.object(random, 'shuffle')
def _test_get_instances_pagination(self, mock_shuffle,
mock_get_by_filters, shuffle=False, updated_since=None,
project_id=None):
fake_context = 'fake_context'
instances0 = objects.instance._make_instance_list(fake_context,
objects.InstanceList(),
[fake_instance.fake_db_instance() for i in range(3)],
expected_attrs=None)
marker0 = instances0[-1]['uuid']
instances1 = objects.instance._make_instance_list(fake_context,
objects.InstanceList(),
[fake_instance.fake_db_instance() for i in range(3)],
expected_attrs=None)
marker1 = instances1[-1]['uuid']
mock_get_by_filters.side_effect = [instances0, instances1, []]
instances = cells_utils.get_instances_to_sync(fake_context,
updated_since, project_id, shuffle=shuffle)
self.assertEqual(len([x for x in instances]), 6)
filters = {}
if updated_since is not None:
filters['changes-since'] = updated_since
if project_id is not None:
filters['project_id'] = project_id
limit = 100
expected_calls = [mock.call(fake_context, filters, sort_key='deleted',
sort_dir='asc', limit=limit, marker=None),
mock.call(fake_context, filters, sort_key='deleted',
sort_dir='asc', limit=limit, marker=marker0),
mock.call(fake_context, filters, sort_key='deleted',
sort_dir='asc', limit=limit, marker=marker1)]
mock_get_by_filters.assert_has_calls(expected_calls)
self.assertEqual(3, mock_get_by_filters.call_count)
def test_get_instances_to_sync_limit(self):
self._test_get_instances_pagination()
def test_get_instances_to_sync_shuffle(self):
self._test_get_instances_pagination(shuffle=True)
def test_get_instances_to_sync_updated_since(self):
self._test_get_instances_pagination(updated_since='fake-updated-since')
def test_get_instances_to_sync_multiple_params(self):
self._test_get_instances_pagination(project_id='fake-project',
updated_since='fake-updated-since', shuffle=True)
def test_split_cell_and_item(self):
path = 'australia', 'queensland', 'gold_coast'
cell = cells_utils.PATH_CELL_SEP.join(path)
item = 'host_5'
together = cells_utils.cell_with_item(cell, item)
self.assertEqual(cells_utils.CELL_ITEM_SEP.join([cell, item]),
together)
# Test normal usage
result_cell, result_item = cells_utils.split_cell_and_item(together)
self.assertEqual(cell, result_cell)
self.assertEqual(item, result_item)
# Test with no cell
cell = None
together = cells_utils.cell_with_item(cell, item)
self.assertEqual(item, together)
result_cell, result_item = cells_utils.split_cell_and_item(together)
self.assertEqual(cell, result_cell)
self.assertEqual(item, result_item)
def test_add_cell_to_compute_node(self):
fake_compute = objects.ComputeNode(id=1, host='fake')
cell_path = 'fake_path'
proxy = cells_utils.add_cell_to_compute_node(fake_compute, cell_path)
self.assertIsInstance(proxy, cells_utils.ComputeNodeProxy)
self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id)
self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'),
proxy.host)
@mock.patch.object(objects.Service, 'obj_load_attr')
def test_add_cell_to_service_no_compute_node(self, mock_get_by_id):
fake_service = objects.Service(id=1, host='fake')
mock_get_by_id.side_effect = exception.ServiceNotFound(service_id=1)
cell_path = 'fake_path'
proxy = cells_utils.add_cell_to_service(fake_service, cell_path)
self.assertIsInstance(proxy, cells_utils.ServiceProxy)
self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id)
self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'),
proxy.host)
self.assertRaises(AttributeError,
getattr, proxy, 'compute_node')
def test_add_cell_to_service_with_compute_node(self):
fake_service = objects.Service(id=1, host='fake')
fake_service.compute_node = objects.ComputeNode(id=1, host='fake')
cell_path = 'fake_path'
proxy = cells_utils.add_cell_to_service(fake_service, cell_path)
self.assertIsInstance(proxy, cells_utils.ServiceProxy)
self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id)
self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'),
proxy.host)
self.assertRaises(AttributeError,
getattr, proxy, 'compute_node')
def test_proxy_object_serializer_to_primitive(self):
obj = objects.ComputeNode(id=1, host='fake')
obj_proxy = cells_utils.ComputeNodeProxy(obj, 'fake_path')
serializer = cells_utils.ProxyObjectSerializer()
primitive = serializer.serialize_entity('ctx', obj_proxy)
self.assertIsInstance(primitive, dict)
class_name = primitive.pop('cell_proxy.class_name')
cell_path = primitive.pop('cell_proxy.cell_path')
self.assertEqual('ComputeNodeProxy', class_name)
self.assertEqual('fake_path', cell_path)
self.assertEqual(obj.obj_to_primitive(), primitive)
def test_proxy_object_serializer_from_primitive(self):
obj = objects.ComputeNode(id=1, host='fake')
serializer = cells_utils.ProxyObjectSerializer()
# Recreating the primitive by hand to isolate the test for only
# the deserializing method
primitive = obj.obj_to_primitive()
primitive['cell_proxy.class_name'] = 'ComputeNodeProxy'
primitive['cell_proxy.cell_path'] = 'fake_path'
result = serializer.deserialize_entity('ctx', primitive)
self.assertIsInstance(result, cells_utils.ComputeNodeProxy)
self.assertEqual(obj.obj_to_primitive(),
result._obj.obj_to_primitive())
self.assertEqual('fake_path', result._cell_path)
-216
View File
@@ -1,216 +0,0 @@
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for testing the cells weight algorithms.
Cells with higher weights should be given priority for new builds.
"""
import datetime
from oslo_utils import fixture as utils_fixture
from oslo_utils import timeutils
from nova.cells import state
from nova.cells import weights
from nova import test
class FakeCellState(state.CellState):
def __init__(self, cell_name):
super(FakeCellState, self).__init__(cell_name)
self.capacities['ram_free'] = {'total_mb': 0,
'units_by_mb': {}}
self.db_info = {}
def _update_ram_free(self, *args):
ram_free = self.capacities['ram_free']
for ram_size, units in args:
ram_free['total_mb'] += units * ram_size
ram_free['units_by_mb'][str(ram_size)] = units
def _get_fake_cells():
cell1 = FakeCellState('cell1')
cell1._update_ram_free((512, 1), (1024, 4), (2048, 3))
cell1.db_info['weight_offset'] = -200.0
cell2 = FakeCellState('cell2')
cell2._update_ram_free((512, 2), (1024, 3), (2048, 4))
cell2.db_info['weight_offset'] = -200.1
cell3 = FakeCellState('cell3')
cell3._update_ram_free((512, 3), (1024, 2), (2048, 1))
cell3.db_info['weight_offset'] = 400.0
cell4 = FakeCellState('cell4')
cell4._update_ram_free((512, 4), (1024, 1), (2048, 2))
cell4.db_info['weight_offset'] = 300.0
return [cell1, cell2, cell3, cell4]
class CellsWeightsTestCase(test.NoDBTestCase):
"""Makes sure the proper weighers are in the directory."""
def test_all_weighers(self):
weighers = weights.all_weighers()
# Check at least a couple that we expect are there
self.assertGreaterEqual(len(weighers), 2)
class_names = [cls.__name__ for cls in weighers]
self.assertIn('WeightOffsetWeigher', class_names)
self.assertIn('RamByInstanceTypeWeigher', class_names)
class _WeigherTestClass(test.NoDBTestCase):
"""Base class for testing individual weigher plugins."""
weigher_cls_name = None
def setUp(self):
super(_WeigherTestClass, self).setUp()
self.weight_handler = weights.CellWeightHandler()
weigher_classes = self.weight_handler.get_matching_classes(
[self.weigher_cls_name])
self.weighers = [cls() for cls in weigher_classes]
def _get_weighed_cells(self, cells, weight_properties):
return self.weight_handler.get_weighed_objects(self.weighers,
cells, weight_properties)
class RAMByInstanceTypeWeigherTestClass(_WeigherTestClass):
weigher_cls_name = ('nova.cells.weights.ram_by_instance_type.'
'RamByInstanceTypeWeigher')
def test_default_spreading(self):
"""Test that cells with more ram available return a higher weight."""
cells = _get_fake_cells()
# Simulate building a new 512MB instance.
instance_type = {'memory_mb': 512}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[3], cells[2], cells[1], cells[0]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 1024MB instance.
instance_type = {'memory_mb': 1024}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[0], cells[1], cells[2], cells[3]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 2048MB instance.
instance_type = {'memory_mb': 2048}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[1], cells[0], cells[3], cells[2]]
self.assertEqual(expected_cells, resulting_cells)
def test_negative_multiplier(self):
"""Test that cells with less ram available return a higher weight."""
self.flags(ram_weight_multiplier=-1.0, group='cells')
cells = _get_fake_cells()
# Simulate building a new 512MB instance.
instance_type = {'memory_mb': 512}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[0], cells[1], cells[2], cells[3]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 1024MB instance.
instance_type = {'memory_mb': 1024}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[3], cells[2], cells[1], cells[0]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 2048MB instance.
instance_type = {'memory_mb': 2048}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[2], cells[3], cells[0], cells[1]]
self.assertEqual(expected_cells, resulting_cells)
class WeightOffsetWeigherTestClass(_WeigherTestClass):
"""Test the RAMWeigher class."""
weigher_cls_name = 'nova.cells.weights.weight_offset.WeightOffsetWeigher'
def test_weight_offset(self):
"""Test that cells with higher weight_offsets return higher
weights.
"""
cells = _get_fake_cells()
weighed_cells = self._get_weighed_cells(cells, {})
self.assertEqual(4, len(weighed_cells))
expected_cells = [cells[2], cells[3], cells[0], cells[1]]
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
self.assertEqual(expected_cells, resulting_cells)
class MuteWeigherTestClass(_WeigherTestClass):
weigher_cls_name = 'nova.cells.weights.mute_child.MuteChildWeigher'
def setUp(self):
super(MuteWeigherTestClass, self).setUp()
self.flags(mute_weight_multiplier=-10.0, mute_child_interval=100,
group='cells')
self.now = timeutils.utcnow()
self.useFixture(utils_fixture.TimeFixture(self.now))
self.cells = _get_fake_cells()
for cell in self.cells:
cell.last_seen = self.now
def test_non_mute(self):
weight_properties = {}
weighed_cells = self._get_weighed_cells(self.cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
for weighed_cell in weighed_cells:
self.assertEqual(0, weighed_cell.weight)
def test_mutes(self):
# make 2 of them mute:
self.cells[0].last_seen = (self.cells[0].last_seen -
datetime.timedelta(seconds=200))
self.cells[1].last_seen = (self.cells[1].last_seen -
datetime.timedelta(seconds=200))
weight_properties = {}
weighed_cells = self._get_weighed_cells(self.cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
for i in range(2):
weighed_cell = weighed_cells.pop(0)
self.assertEqual(0, weighed_cell.weight)
self.assertIn(weighed_cell.obj.name, ['cell3', 'cell4'])
for i in range(2):
weighed_cell = weighed_cells.pop(0)
self.assertEqual(-10.0, weighed_cell.weight)
self.assertIn(weighed_cell.obj.name, ['cell1', 'cell2'])
+156 -486
View File
@@ -29,7 +29,6 @@ from oslo_utils import uuidutils
import six
from nova.compute import api as compute_api
from nova.compute import cells_api as compute_cells_api
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import rpcapi as compute_rpcapi
@@ -141,7 +140,6 @@ class _ComputeAPIUnitTestMixIn(object):
instance._context = self.context
instance.id = 1
instance.uuid = uuidutils.generate_uuid()
instance.cell_name = 'api!child'
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.image_ref = FAKE_IMAGE_REF
@@ -646,10 +644,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertIsNone(instance.task_state)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
rpcapi = self.compute_api.compute_rpcapi
with test.nested(
mock.patch.object(instance, 'save'),
@@ -685,10 +680,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
self.assertIsNone(instance.task_state)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
rpcapi = self.compute_api.compute_rpcapi
with test.nested(
mock.patch.object(instance, 'save'),
@@ -709,10 +701,7 @@ class _ComputeAPIUnitTestMixIn(object):
params = dict(vm_state=vm_states.STOPPED)
instance = self._create_instance_obj(params=params)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
rpcapi = self.compute_api.compute_rpcapi
with test.nested(
mock.patch.object(instance, 'save'),
@@ -747,10 +736,7 @@ class _ComputeAPIUnitTestMixIn(object):
params = dict(task_state=None, progress=99, vm_state=vm_state)
instance = self._create_instance_obj(params=params)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
rpcapi = self.compute_api.compute_rpcapi
with test.nested(
mock.patch.object(instance, 'save'),
@@ -831,11 +817,7 @@ class _ComputeAPIUnitTestMixIn(object):
_record_action_start.assert_called_once_with(self.context, instance,
instance_actions.TRIGGER_CRASH_DUMP)
if self.cell_type == 'api':
# cell api has not been implemented.
pass
else:
trigger_crash_dump.assert_called_once_with(self.context, instance)
trigger_crash_dump.assert_called_once_with(self.context, instance)
self.assertIsNone(instance.task_state)
@@ -870,10 +852,7 @@ class _ComputeAPIUnitTestMixIn(object):
if reboot_type == 'HARD':
expected_task_state = task_states.ALLOW_REBOOT
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
rpcapi = self.compute_api.compute_rpcapi
with test.nested(
mock.patch.object(self.context, 'elevated'),
@@ -1068,8 +1047,6 @@ class _ComputeAPIUnitTestMixIn(object):
snapshot_id = self._set_delete_shelved_part(inst,
mock_image_delete)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
mock_terminate = self.useFixture(
fixtures.MockPatchObject(rpcapi, 'terminate_instance')).mock
mock_soft_delete = self.useFixture(
@@ -1080,68 +1057,64 @@ class _ComputeAPIUnitTestMixIn(object):
# NOTE(comstud): This is getting messy. But what we are wanting
# to test is:
# If cells is enabled and we're the API cell:
# * Cast to cells_rpcapi.<method>
# Otherwise:
# * Check for downed host
# * If downed host:
# * Clean up instance, destroying it, sending notifications.
# (Tested in _test_downed_host_part())
# * If not downed host:
# * Record the action start.
# * Cast to compute_rpcapi.<method>
# * Check for downed host
# * If downed host:
# * Clean up instance, destroying it, sending notifications.
# (Tested in _test_downed_host_part())
# * If not downed host:
# * Record the action start.
# * Cast to compute_rpcapi.<method>
cast = True
is_downed_host = inst.host == 'down-host' or inst.host is None
if self.cell_type != 'api':
if inst.vm_state == vm_states.RESIZED:
migration = objects.Migration._from_db_object(
self.context, objects.Migration(),
test_migration.fake_db_migration())
mock_elevated.return_value = self.context
expected_elevated_calls.append(mock.call())
mock_mig_get.return_value = migration
expected_record_calls.append(
mock.call(self.context, inst,
instance_actions.CONFIRM_RESIZE))
if inst.vm_state == vm_states.RESIZED:
migration = objects.Migration._from_db_object(
self.context, objects.Migration(),
test_migration.fake_db_migration())
mock_elevated.return_value = self.context
expected_elevated_calls.append(mock.call())
mock_mig_get.return_value = migration
expected_record_calls.append(
mock.call(self.context, inst,
instance_actions.CONFIRM_RESIZE))
# After confirm resize action, instance task_state
# is reset to None, so is the expected value. But
# for soft delete, task_state will be again reset
# back to soft-deleting in the code to avoid status
# checking failure.
updates['task_state'] = None
if delete_type == 'soft_delete':
expected_save_calls.append(mock.call())
updates['task_state'] = 'soft-deleting'
if inst.host is not None:
mock_elevated.return_value = self.context
expected_elevated_calls.append(mock.call())
mock_get_cn.return_value = objects.Service()
mock_up.return_value = (inst.host != 'down-host')
if is_downed_host:
mock_elevated.return_value = self.context
expected_elevated_calls.append(mock.call())
# After confirm resize action, instance task_state
# is reset to None, so is the expected value. But
# for soft delete, task_state will be again reset
# back to soft-deleting in the code to avoid status
# checking failure.
updates['task_state'] = None
if delete_type == 'soft_delete':
expected_save_calls.append(mock.call())
state = ('soft' in delete_type and vm_states.SOFT_DELETED or
vm_states.DELETED)
updates.update({'vm_state': state,
'task_state': None,
'terminated_at': delete_time,
'deleted_at': delete_time,
'deleted': True})
fake_inst = fake_instance.fake_db_instance(**updates)
mock_inst_destroy.return_value = fake_inst
cell = objects.CellMapping(uuid=uuids.cell,
transport_url='fake://',
database_connection='fake://')
im = objects.InstanceMapping(cell_mapping=cell)
mock_get_inst.return_value = im
cast = False
updates['task_state'] = 'soft-deleting'
if cast and self.cell_type != 'api':
if inst.host is not None:
mock_elevated.return_value = self.context
expected_elevated_calls.append(mock.call())
mock_get_cn.return_value = objects.Service()
mock_up.return_value = (inst.host != 'down-host')
if is_downed_host:
mock_elevated.return_value = self.context
expected_elevated_calls.append(mock.call())
expected_save_calls.append(mock.call())
state = ('soft' in delete_type and vm_states.SOFT_DELETED or
vm_states.DELETED)
updates.update({'vm_state': state,
'task_state': None,
'terminated_at': delete_time,
'deleted_at': delete_time,
'deleted': True})
fake_inst = fake_instance.fake_db_instance(**updates)
mock_inst_destroy.return_value = fake_inst
cell = objects.CellMapping(uuid=uuids.cell,
transport_url='fake://',
database_connection='fake://')
im = objects.InstanceMapping(cell_mapping=cell)
mock_get_inst.return_value = im
cast = False
if cast:
expected_record_calls.append(mock.call(self.context, inst,
instance_actions.DELETE))
@@ -1162,39 +1135,38 @@ class _ComputeAPIUnitTestMixIn(object):
if expected_elevated_calls:
mock_elevated.assert_has_calls(expected_elevated_calls)
if self.cell_type != 'api':
if inst.vm_state == vm_states.RESIZED:
mock_mig_get.assert_called_once_with(
self.context, instance_uuid, 'finished')
mock_confirm.assert_called_once_with(
self.context, inst, migration, migration['source_compute'],
cast=False)
if instance_host is not None:
mock_get_cn.assert_called_once_with(self.context,
instance_host)
mock_up.assert_called_once_with(
test.MatchType(objects.Service))
if is_downed_host:
if 'soft' in delete_type:
mock_notify_legacy.assert_has_calls([
mock.call(self.compute_api.notifier, self.context,
inst, 'delete.start'),
mock.call(self.compute_api.notifier, self.context,
inst, 'delete.end')])
else:
mock_notify_legacy.assert_has_calls([
mock.call(self.compute_api.notifier, self.context,
inst, '%s.start' % delete_type),
mock.call(self.compute_api.notifier, self.context,
inst, '%s.end' % delete_type)])
mock_deallocate.assert_called_once_with(self.context, inst)
mock_inst_destroy.assert_called_once_with(
self.context, instance_uuid, constraint=None,
hard_delete=False)
mock_get_inst.assert_called_with(self.context, instance_uuid)
self.assertEqual(2, mock_get_inst.call_count)
self.assertTrue(mock_get_inst.return_value.queued_for_delete)
mock_save_im.assert_called_once_with()
if inst.vm_state == vm_states.RESIZED:
mock_mig_get.assert_called_once_with(
self.context, instance_uuid, 'finished')
mock_confirm.assert_called_once_with(
self.context, inst, migration, migration['source_compute'],
cast=False)
if instance_host is not None:
mock_get_cn.assert_called_once_with(self.context,
instance_host)
mock_up.assert_called_once_with(
test.MatchType(objects.Service))
if is_downed_host:
if 'soft' in delete_type:
mock_notify_legacy.assert_has_calls([
mock.call(self.compute_api.notifier, self.context,
inst, 'delete.start'),
mock.call(self.compute_api.notifier, self.context,
inst, 'delete.end')])
else:
mock_notify_legacy.assert_has_calls([
mock.call(self.compute_api.notifier, self.context,
inst, '%s.start' % delete_type),
mock.call(self.compute_api.notifier, self.context,
inst, '%s.end' % delete_type)])
mock_deallocate.assert_called_once_with(self.context, inst)
mock_inst_destroy.assert_called_once_with(
self.context, instance_uuid, constraint=None,
hard_delete=False)
mock_get_inst.assert_called_with(self.context, instance_uuid)
self.assertEqual(2, mock_get_inst.call_count)
self.assertTrue(mock_get_inst.return_value.queued_for_delete)
mock_save_im.assert_called_once_with()
if cast:
if delete_type == 'soft_delete':
@@ -1313,16 +1285,10 @@ class _ComputeAPIUnitTestMixIn(object):
with mock.patch.object(self.compute_api.compute_rpcapi,
'terminate_instance') as mock_terminate:
self.compute_api.delete(self.context, inst)
if self.cell_type == 'api':
mock_terminate.assert_called_once_with(
self.context, inst, mock_bdm_get.return_value,
delete_type='delete')
mock_local_delete.assert_not_called()
else:
mock_local_delete.assert_called_once_with(
self.context, inst, mock_bdm_get.return_value,
'delete', self.compute_api._do_delete)
mock_terminate.assert_not_called()
mock_local_delete.assert_called_once_with(
self.context, inst, mock_bdm_get.return_value,
'delete', self.compute_api._do_delete)
mock_terminate.assert_not_called()
mock_service_get.assert_not_called()
@mock.patch('nova.compute.api.API._delete_while_booting',
@@ -1348,21 +1314,14 @@ class _ComputeAPIUnitTestMixIn(object):
with mock.patch.object(self.compute_api.compute_rpcapi,
'terminate_instance') as mock_terminate:
self.compute_api.delete(self.context, inst)
if self.cell_type == 'api':
mock_terminate.assert_called_once_with(
self.context, inst, mock_bdm_get.return_value,
delete_type='delete')
mock_local_delete.assert_not_called()
mock_service_get.assert_not_called()
else:
mock_service_get.assert_called_once_with(
mock_elevated.return_value, 'fake-host')
mock_service_up.assert_called_once_with(
mock_service_get.return_value)
mock_terminate.assert_called_once_with(
self.context, inst, mock_bdm_get.return_value,
delete_type='delete')
mock_local_delete.assert_not_called()
mock_service_get.assert_called_once_with(
mock_elevated.return_value, 'fake-host')
mock_service_up.assert_called_once_with(
mock_service_get.return_value)
mock_terminate.assert_called_once_with(
self.context, inst, mock_bdm_get.return_value,
delete_type='delete')
mock_local_delete.assert_not_called()
def test_delete_forced_when_task_state_is_not_none(self):
for vm_state in self._get_vm_states():
@@ -1389,27 +1348,18 @@ class _ComputeAPIUnitTestMixIn(object):
fixtures.MockPatchObject(self.compute_api,
'_lookup_instance')).mock
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
mock_terminate = self.useFixture(
fixtures.MockPatchObject(rpcapi,
'terminate_instance')).mock
mock_lookup.return_value = (None, inst)
mock_bdm_get.return_value = objects.BlockDeviceMappingList()
mock_br_get.side_effect = exception.BuildRequestNotFound(
uuid=inst.uuid)
if self.cell_type != 'api':
mock_cons.return_value = 'constraint'
delete_time = datetime.datetime(1955, 11, 5, 9, 30,
tzinfo=iso8601.UTC)
updates['deleted_at'] = delete_time
updates['deleted'] = True
fake_inst = fake_instance.fake_db_instance(**updates)
mock_inst_destroy.return_value = fake_inst
mock_cons.return_value = 'constraint'
delete_time = datetime.datetime(1955, 11, 5, 9, 30,
tzinfo=iso8601.UTC)
updates['deleted_at'] = delete_time
updates['deleted'] = True
fake_inst = fake_instance.fake_db_instance(**updates)
mock_inst_destroy.return_value = fake_inst
instance_uuid = inst.uuid
self.compute_api.delete(self.context, inst)
@@ -1422,27 +1372,21 @@ class _ComputeAPIUnitTestMixIn(object):
mock_br_get.assert_called_once_with(self.context, instance_uuid)
mock_save.assert_called_once_with()
if self.cell_type == 'api':
mock_terminate.assert_called_once_with(
self.context, inst,
test.MatchType(objects.BlockDeviceMappingList),
delete_type='delete')
else:
mock_notify_legacy.assert_has_calls([
mock.call(self.compute_api.notifier, self.context,
inst, 'delete.start'),
mock.call(self.compute_api.notifier, self.context,
inst, 'delete.end')])
mock_notify.assert_has_calls([
mock.call(self.context, inst, host='fake-mini',
source='nova-api', action='delete', phase='start'),
mock.call(self.context, inst, host='fake-mini',
source='nova-api', action='delete', phase='end')])
mock_notify_legacy.assert_has_calls([
mock.call(self.compute_api.notifier, self.context,
inst, 'delete.start'),
mock.call(self.compute_api.notifier, self.context,
inst, 'delete.end')])
mock_notify.assert_has_calls([
mock.call(self.context, inst, host='fake-mini',
source='nova-api', action='delete', phase='start'),
mock.call(self.context, inst, host='fake-mini',
source='nova-api', action='delete', phase='end')])
mock_cons.assert_called_once_with(host=mock.ANY)
mock_inst_destroy.assert_called_once_with(
self.context, instance_uuid, constraint='constraint',
hard_delete=False)
mock_cons.assert_called_once_with(host=mock.ANY)
mock_inst_destroy.assert_called_once_with(
self.context, instance_uuid, constraint='constraint',
hard_delete=False)
def _fake_do_delete(context, instance, bdms,
rservations=None, local=False):
@@ -1489,8 +1433,7 @@ class _ComputeAPIUnitTestMixIn(object):
mock_bdm_destroy.assert_called_once_with()
mock_inst_destroy.assert_called_once_with()
if self.cell_type != 'api':
mock_dealloc.assert_called_once_with(self.context, inst)
mock_dealloc.assert_called_once_with(self.context, inst)
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
def test_local_cleanup_bdm_volumes_stashed_connector(self, mock_destroy):
@@ -1784,16 +1727,12 @@ class _ComputeAPIUnitTestMixIn(object):
def test(mock_inst_get, mock_map_get):
cell, ret_instance = self.compute_api._lookup_instance(
self.context, instance.uuid)
expected_cell = (self.cell_type is None and
inst_map.cell_mapping or None)
expected_cell = inst_map.cell_mapping
self.assertEqual((expected_cell, instance),
(cell, ret_instance))
mock_inst_get.assert_called_once_with(self.context, instance.uuid)
if self.cell_type is None:
mock_target_cell.assert_called_once_with(self.context,
inst_map.cell_mapping)
else:
self.assertFalse(mock_target_cell.called)
mock_target_cell.assert_called_once_with(self.context,
inst_map.cell_mapping)
test()
@@ -1987,8 +1926,7 @@ class _ComputeAPIUnitTestMixIn(object):
else:
new_flavor = current_flavor
if (self.cell_type == 'compute' or
not (flavor_id_passed and same_flavor)):
if not (flavor_id_passed and same_flavor):
project_id, user_id = quotas_obj.ids_from_instance(self.context,
fake_inst)
if flavor_id_passed:
@@ -2014,27 +1952,6 @@ class _ComputeAPIUnitTestMixIn(object):
else:
filter_properties = {'ignore_hosts': [fake_inst['host']]}
if self.cell_type == 'api':
mig = mock.MagicMock()
mock_migration.return_value = mig
def _check_mig():
self.assertEqual(fake_inst.uuid, mig.instance_uuid)
self.assertEqual(current_flavor.id,
mig.old_instance_type_id)
self.assertEqual(new_flavor.id,
mig.new_instance_type_id)
self.assertEqual('finished', mig.status)
if new_flavor.id != current_flavor.id:
self.assertEqual('resize', mig.migration_type)
else:
self.assertEqual('migration', mig.migration_type)
mock_elevated = self.useFixture(
fixtures.MockPatchObject(self.context, 'elevated')).mock
mock_elevated.return_value = self.context
mig.create.side_effect = _check_mig
if request_spec:
fake_spec = objects.RequestSpec()
if requested_destination:
@@ -2092,8 +2009,7 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_flavor.assert_called_once_with('new-flavor-id',
read_deleted='no')
if (self.cell_type == 'compute' or
not (flavor_id_passed and same_flavor)):
if not (flavor_id_passed and same_flavor):
if flavor_id_passed:
mock_upsize.assert_called_once_with(
test.MatchType(objects.Flavor),
@@ -2122,12 +2038,7 @@ class _ComputeAPIUnitTestMixIn(object):
# This is a migration
mock_validate.assert_not_called()
if self.cell_type == 'api' and request_spec:
mock_migration.assert_called_once_with(context=self.context)
mock_elevated.assert_called_once_with()
mig.create.assert_called_once_with()
else:
mock_migration.assert_not_called()
mock_migration.assert_not_called()
mock_get_by_instance_uuid.assert_called_once_with(self.context,
fake_inst.uuid)
@@ -2312,13 +2223,11 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.compute.api.API._validate_flavor_image_nostatus')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch('nova.compute.api.API._record_action_start')
@mock.patch('nova.compute.api.API._resize_cells_support')
@mock.patch('nova.conductor.conductor_api.ComputeTaskAPI.resize_instance')
@mock.patch.object(flavors, 'get_flavor_by_flavor_id')
def test_resize_to_zero_disk_flavor_volume_backed(self,
get_flavor_by_flavor_id,
resize_instance_mock,
cells_support_mock,
record_mock,
get_by_inst,
validate_mock):
@@ -2460,10 +2369,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertIsNone(instance.task_state)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
rpcapi = self.compute_api.compute_rpcapi
mock_pause = self.useFixture(
fixtures.MockPatchObject(rpcapi, 'pause_instance')).mock
@@ -2501,10 +2407,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.assertEqual(instance.vm_state, vm_states.PAUSED)
self.assertIsNone(instance.task_state)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.compute_rpcapi
rpcapi = self.compute_api.compute_rpcapi
with mock.patch.object(rpcapi, 'unpause_instance') as mock_unpause:
self.compute_api.unpause(self.context, instance)
@@ -2550,10 +2453,7 @@ class _ComputeAPIUnitTestMixIn(object):
add_instance_fault_from_exc,
mock_nodelist):
instance = self._create_instance_obj()
if self.cell_type == 'api':
api = self.compute_api.cells_rpcapi
else:
api = conductor.api.ComputeTaskAPI
api = conductor.api.ComputeTaskAPI
with mock.patch.object(api, 'live_migrate_instance',
side_effect=oslo_exceptions.MessagingTimeout):
@@ -2588,13 +2488,7 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceAction, 'action_start')
def _live_migrate_instance(self, instance, _save, _action, get_spec):
# TODO(gilliard): This logic is upside-down (different
# behaviour depending on which class this method is mixed-into. Once
# we have cellsv2 we can remove this kind of logic from this test
if self.cell_type == 'api':
api = self.compute_api.cells_rpcapi
else:
api = conductor.api.ComputeTaskAPI
api = conductor.api.ComputeTaskAPI
fake_spec = objects.RequestSpec()
get_spec.return_value = fake_spec
with mock.patch.object(api, 'live_migrate_instance') as task:
@@ -5486,9 +5380,6 @@ class _ComputeAPIUnitTestMixIn(object):
def test_live_migrate_force_complete_succeeded(
self, action_start, get_by_id_and_instance):
if self.cell_type == 'api':
# cell api has not been implemented.
return
rpcapi = self.compute_api.compute_rpcapi
instance = self._create_instance_obj()
@@ -5917,21 +5808,10 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_inst_map):
self.useFixture(nova_fixtures.AllServicesCurrent())
if self.cell_type is None:
# No Mapping means NotFound
self.assertRaises(exception.InstanceNotFound,
self.compute_api.get, self.context,
uuids.inst_uuid)
else:
self.compute_api.get(self.context, uuids.inst_uuid)
mock_get_build_req.assert_not_called()
mock_get_inst.assert_called_once_with(self.context,
uuids.inst_uuid,
expected_attrs=[
'metadata',
'system_metadata',
'security_groups',
'info_cache'])
# No Mapping means NotFound
self.assertRaises(exception.InstanceNotFound,
self.compute_api.get, self.context,
uuids.inst_uuid)
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
@mock.patch.object(objects.BuildRequest, 'get_by_instance_uuid')
@@ -5946,16 +5826,10 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_inst.return_value = instance
inst_from_build_req = self.compute_api.get(self.context, instance.uuid)
if self.cell_type is None:
mock_get_inst_map.assert_called_once_with(self.context,
instance.uuid)
mock_get_build_req.assert_called_once_with(self.context,
instance.uuid)
else:
mock_get_inst.assert_called_once_with(
self.context, instance.uuid,
expected_attrs=['metadata', 'system_metadata',
'security_groups', 'info_cache'])
mock_get_inst_map.assert_called_once_with(self.context,
instance.uuid)
mock_get_build_req.assert_called_once_with(self.context,
instance.uuid)
self.assertEqual(instance, inst_from_build_req)
@mock.patch('nova.compute.api.API._save_user_id_in_instance_mapping',
@@ -5987,11 +5861,10 @@ class _ComputeAPIUnitTestMixIn(object):
inst_map_calls = [mock.call(self.context, instance.uuid),
mock.call(self.context, instance.uuid)]
if self.cell_type is None:
mock_get_inst_map.assert_has_calls(inst_map_calls)
self.assertEqual(2, mock_get_inst_map.call_count)
mock_get_build_req.assert_called_once_with(self.context,
instance.uuid)
mock_get_inst_map.assert_has_calls(inst_map_calls)
self.assertEqual(2, mock_get_inst_map.call_count)
mock_get_build_req.assert_called_once_with(self.context,
instance.uuid)
mock_get_inst.assert_called_once_with(self.context, instance.uuid,
expected_attrs=[
@@ -6029,21 +5902,9 @@ class _ComputeAPIUnitTestMixIn(object):
uuid=instance.uuid)
mock_get_inst.return_value = instance
if self.cell_type is None:
self.assertRaises(exception.InstanceNotFound,
self.compute_api.get,
self.context, instance.uuid)
else:
inst_from_get = self.compute_api.get(self.context, instance.uuid)
mock_get_inst.assert_called_once_with(self.context,
instance.uuid,
expected_attrs=[
'metadata',
'system_metadata',
'security_groups',
'info_cache'])
self.assertEqual(instance, inst_from_get)
self.assertRaises(exception.InstanceNotFound,
self.compute_api.get,
self.context, instance.uuid)
@mock.patch('nova.objects.InstanceMapping.save')
def test_save_user_id_in_instance_mapping(self, im_save):
@@ -6087,13 +5948,10 @@ class _ComputeAPIUnitTestMixIn(object):
returned_inst = self.compute_api.get(self.context, instance.uuid)
mock_get_build_req.assert_not_called()
if self.cell_type is None:
mock_get_inst_map.assert_called_once_with(self.context,
instance.uuid)
# Verify that user_id is populated during a compute_api.get().
mock_save_uid.assert_called_once_with(inst_map, instance)
else:
self.assertFalse(mock_get_inst_map.called)
mock_get_inst_map.assert_called_once_with(self.context,
instance.uuid)
# Verify that user_id is populated during a compute_api.get().
mock_save_uid.assert_called_once_with(inst_map, instance)
self.assertEqual(instance, returned_inst)
mock_get_inst.assert_called_once_with(self.context, instance.uuid,
expected_attrs=[
@@ -6242,9 +6100,9 @@ class _ComputeAPIUnitTestMixIn(object):
limit=10, marker='fake-marker', sort_keys=['baz'],
sort_dirs=['desc'])
if self.cell_type is None:
for cm in mock_cm_get_all.return_value:
mock_target_cell.assert_any_call(self.context, cm)
for cm in mock_cm_get_all.return_value:
mock_target_cell.assert_any_call(self.context, cm)
fields = ['metadata', 'info_cache', 'security_groups']
mock_inst_get.assert_called_once_with(
mock.ANY, {'foo': 'bar'},
@@ -6467,7 +6325,6 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
def setUp(self):
super(ComputeAPIUnitTestCase, self).setUp()
self.compute_api = compute_api.API()
self.cell_type = None
def test_resize_same_flavor_fails(self):
self.assertRaises(exception.CannotResizeToSameFlavor,
@@ -6832,193 +6689,6 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
mock_report_client.assert_called_once_with()
class Cellsv1DeprecatedTestMixIn(object):
def test_get_all_build_requests_decrement_limit(self):
self.skipTest("Removing cells v1")
def test_get_all_cell0_marker_not_found(self):
self.skipTest("Removing cells v1")
def test_get_all_includes_build_request_cell0(self):
self.skipTest("Removing cells v1")
def test_get_all_includes_build_requests(self):
self.skipTest("Removing cells v1")
def test_get_all_includes_build_requests_filter_dupes(self):
self.skipTest("Removing cells v1")
def test_tenant_to_project_conversion(self):
self.skipTest("Removing cells v1")
def test_get_all_with_cell_down_support(self):
self.skipTest("Cell down handling is not supported for cells_v1.")
def test_get_all_without_cell_down_support(self):
self.skipTest("Cell down handling is not supported for cells_v1.")
def test_get_all_with_cell_down_support_all_tenants(self):
self.skipTest("Cell down handling is not supported for cells_v1.")
class ComputeAPIAPICellUnitTestCase(Cellsv1DeprecatedTestMixIn,
_ComputeAPIUnitTestMixIn,
test.NoDBTestCase):
def setUp(self):
super(ComputeAPIAPICellUnitTestCase, self).setUp()
self.flags(cell_type='api', enable=True, group='cells')
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.cell_type = 'api'
def test_resize_same_flavor_fails(self):
self.assertRaises(exception.CannotResizeToSameFlavor,
self._test_resize, same_flavor=True)
@mock.patch.object(compute_cells_api, 'ComputeRPCAPIRedirect')
def test_create_volume_bdm_call_reserve_dev_name(self, mock_reserve):
instance = self._create_instance_obj()
# In the cells rpcapi there isn't the call for the
# reserve_block_device_name so the volume_bdm returned
# by the _create_volume_bdm is None
volume = {'id': '1', 'multiattach': False}
result = self.compute_api._create_volume_bdm(self.context,
instance,
'vda',
volume,
None,
None)
self.assertIsNone(result, None)
@mock.patch.object(compute_cells_api.ComputeCellsAPI, '_call_to_cells')
@mock.patch.object(objects.Service, 'get_minimum_version',
return_value=COMPUTE_VERSION_OLD_ATTACH_FLOW)
def test_attach_volume(self, mock_get_min_ver, mock_attach):
instance = self._create_instance_obj()
volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
None, None, None, None, None)
mock_volume_api = mock.patch.object(self.compute_api, 'volume_api',
mock.MagicMock(spec=cinder.API))
with mock_volume_api as mock_v_api:
mock_v_api.get.return_value = volume
self.compute_api.attach_volume(
self.context, instance, volume['id'])
mock_v_api.check_availability_zone.assert_called_once_with(
self.context, volume, instance=instance)
mock_attach.assert_called_once_with(self.context, instance,
'attach_volume', volume['id'],
None, None, None)
@mock.patch.object(compute_cells_api.ComputeCellsAPI, '_call_to_cells')
@mock.patch.object(objects.Service, 'get_minimum_version',
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW)
@mock.patch.object(cinder, 'is_microversion_supported')
@mock.patch.object(objects.BlockDeviceMapping,
'get_by_volume_and_instance')
def test_attach_volume_new_flow(self, mock_no_bdm,
mock_cinder_mv_supported,
mock_get_min_ver, mock_attach):
mock_no_bdm.side_effect = exception.VolumeBDMNotFound(
volume_id='test-vol')
instance = self._create_instance_obj()
volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
None, None, None, None, None)
mock_volume_api = mock.patch.object(self.compute_api, 'volume_api',
mock.MagicMock(spec=cinder.API))
with mock_volume_api as mock_v_api:
mock_v_api.get.return_value = volume
self.compute_api.attach_volume(
self.context, instance, volume['id'])
mock_v_api.check_availability_zone.assert_called_once_with(
self.context, volume, instance=instance)
mock_attach.assert_called_once_with(self.context, instance,
'attach_volume', volume['id'],
None, None, None)
@mock.patch.object(objects.Service, 'get_minimum_version',
return_value=COMPUTE_VERSION_OLD_ATTACH_FLOW)
@mock.patch('nova.volume.cinder.API.get')
def test_tagged_volume_attach(self, mock_vol_get, mock_get_min_ver):
instance = self._create_instance_obj()
volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
None, None, None, None, None)
mock_vol_get.return_value = volume
self.assertRaises(exception.VolumeTaggedAttachNotSupported,
self.compute_api.attach_volume, self.context,
instance, volume['id'], tag='foo')
@mock.patch.object(objects.Service, 'get_minimum_version',
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW)
@mock.patch.object(cinder, 'is_microversion_supported')
@mock.patch.object(objects.BlockDeviceMapping,
'get_by_volume_and_instance')
@mock.patch('nova.volume.cinder.API.get')
def test_tagged_volume_attach_new_flow(self, mock_get_vol, mock_no_bdm,
mock_cinder_mv_supported,
mock_get_min_ver):
mock_no_bdm.side_effect = exception.VolumeBDMNotFound(
volume_id='test-vol')
instance = self._create_instance_obj()
volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
None, None, None, None, None)
mock_get_vol.return_value = volume
self.assertRaises(exception.VolumeTaggedAttachNotSupported,
self.compute_api.attach_volume, self.context,
instance, volume['id'], tag='foo')
def test_create_with_networks_max_count_none(self):
self.skipTest("This test does not test any rpcapi.")
def test_attach_volume_reserve_fails(self):
self.skipTest("Reserve is never done in the API cell.")
def test_attach_volume_attachment_create_fails(self):
self.skipTest("Reserve is never done in the API cell.")
def test_check_requested_networks_no_requested_networks(self):
# The API cell just returns the number of instances passed in since the
# actual validation happens in the child (compute) cell.
self.assertEqual(
2, self.compute_api._check_requested_networks(
self.context, None, 2))
def test_check_requested_networks_auto_allocate(self):
# The API cell just returns the number of instances passed in since the
# actual validation happens in the child (compute) cell.
requested_networks = (
objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='auto')]))
count = self.compute_api._check_requested_networks(
self.context, requested_networks, 5)
self.assertEqual(5, count)
def test_attach_volume_with_multiattach_volume_fails(self):
"""Tests that the cells v1 API doesn't support attaching multiattach
volumes.
"""
instance = objects.Instance(cell_name='foo')
volume = {'multiattach': True}
device = disk_bus = disk_type = None
self.assertRaises(exception.MultiattachSupportNotYetAvailable,
self.compute_api._attach_volume, self.context,
instance, volume, device, disk_bus, disk_type)
class ComputeAPIComputeCellUnitTestCase(Cellsv1DeprecatedTestMixIn,
_ComputeAPIUnitTestMixIn,
test.NoDBTestCase):
def setUp(self):
super(ComputeAPIComputeCellUnitTestCase, self).setUp()
self.flags(cell_type='compute', enable=True, group='cells')
self.compute_api = compute_api.API()
self.cell_type = 'compute'
def test_resize_same_flavor_passes(self):
self._test_resize(same_flavor=True)
class DiffDictTestCase(test.NoDBTestCase):
"""Unit tests for _diff_dict()."""
@@ -1,688 +0,0 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Compute w/ Cells
"""
import copy
import functools
import inspect
import ddt
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova import block_device
from nova.cells import manager
from nova.compute import api as compute_api
from nova.compute import cells_api as compute_cells_api
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova.db import api as db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception
from nova import objects
from nova.objects import fields as obj_fields
from nova import quota
from nova import test
from nova.tests.unit.compute import test_compute
from nova.tests.unit.compute import test_shelve
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
ORIG_COMPUTE_API = None
FAKE_IMAGE_REF = uuids.image_ref
NODENAME = 'fakenode1'
NODENAME2 = 'fakenode2'
def stub_call_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
original_instance = kwargs.pop('original_instance', None)
if original_instance:
instance = original_instance
# Restore this in 'child cell DB'
db.instance_update(context, instance['uuid'],
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
# Use NoopQuotaDriver in child cells.
saved_quotas = quota.QUOTAS
quota.QUOTAS = quota.QuotaEngine(
quota_driver=quota.NoopQuotaDriver())
compute_api.QUOTAS = quota.QUOTAS
try:
return fn(context, instance, *args, **kwargs)
finally:
quota.QUOTAS = saved_quotas
compute_api.QUOTAS = saved_quotas
def stub_cast_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
original_instance = kwargs.pop('original_instance', None)
if original_instance:
instance = original_instance
# Restore this in 'child cell DB'
db.instance_update(context, instance['uuid'],
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
# Use NoopQuotaDriver in child cells.
saved_quotas = quota.QUOTAS
quota.QUOTAS = quota.QuotaEngine(
quota_driver=quota.NoopQuotaDriver())
compute_api.QUOTAS = quota.QUOTAS
try:
fn(context, instance, *args, **kwargs)
finally:
quota.QUOTAS = saved_quotas
compute_api.QUOTAS = saved_quotas
def deploy_stubs(stubs, api, original_instance=None):
call = stub_call_to_cells
cast = stub_cast_to_cells
if original_instance:
kwargs = dict(original_instance=original_instance)
call = functools.partial(stub_call_to_cells, **kwargs)
cast = functools.partial(stub_cast_to_cells, **kwargs)
stubs.Set(api, '_call_to_cells', call)
stubs.Set(api, '_cast_to_cells', cast)
@ddt.ddt
class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
def setUp(self):
self.flags(use_neutron=False)
super(CellsComputeAPITestCase, self).setUp()
global ORIG_COMPUTE_API
ORIG_COMPUTE_API = self.compute_api
self.flags(enable=True, group='cells')
def _fake_validate_cell(*args, **kwargs):
return
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.stubs.Set(self.compute_api, '_validate_cell',
_fake_validate_cell)
deploy_stubs(self.stubs, self.compute_api)
def tearDown(self):
global ORIG_COMPUTE_API
self.compute_api = ORIG_COMPUTE_API
super(CellsComputeAPITestCase, self).tearDown()
def test_instance_metadata(self):
self.skipTest("Test is incompatible with cells.")
def _test_evacuate(self, force=None):
@mock.patch.object(compute_api.API, 'evacuate')
def _test(mock_evacuate):
instance = objects.Instance(uuid=uuids.evacuate_instance,
cell_name='fake_cell_name')
dest_host = 'fake_cell_name@fakenode2'
self.compute_api.evacuate(self.context, instance, host=dest_host,
force=force)
mock_evacuate.assert_called_once_with(
self.context, instance, 'fakenode2', force=force)
_test()
def test_error_evacuate(self):
self.skipTest("Test is incompatible with cells.")
def test_create_instance_sets_system_metadata(self):
self.skipTest("Test is incompatible with cells.")
def test_create_saves_flavor(self):
self.skipTest("Test is incompatible with cells.")
def test_create_instance_associates_security_groups(self):
self.skipTest("Test is incompatible with cells.")
@mock.patch('nova.objects.quotas.Quotas.check_deltas')
def test_create_instance_over_quota_during_recheck(
self, check_deltas_mock):
self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
self.fake_show)
# Simulate a race where the first check passes and the recheck fails.
fake_quotas = {'instances': 5, 'cores': 10, 'ram': 4096}
fake_headroom = {'instances': 5, 'cores': 10, 'ram': 4096}
fake_usages = {'instances': 5, 'cores': 10, 'ram': 4096}
exc = exception.OverQuota(overs=['instances'], quotas=fake_quotas,
headroom=fake_headroom, usages=fake_usages)
check_deltas_mock.side_effect = [None, exc]
inst_type = objects.Flavor.get_by_name(self.context, 'm1.small')
# Try to create 3 instances.
self.assertRaises(exception.QuotaError, self.compute_api.create,
self.context, inst_type, self.fake_image['id'], min_count=3)
project_id = self.context.project_id
self.assertEqual(2, check_deltas_mock.call_count)
call1 = mock.call(self.context,
{'instances': 3, 'cores': inst_type.vcpus * 3,
'ram': inst_type.memory_mb * 3},
project_id, user_id=None,
check_project_id=project_id, check_user_id=None)
call2 = mock.call(self.context, {'instances': 0, 'cores': 0, 'ram': 0},
project_id, user_id=None,
check_project_id=project_id, check_user_id=None)
check_deltas_mock.assert_has_calls([call1, call2])
# Verify we removed the artifacts that were added after the first
# quota check passed.
instances = objects.InstanceList.get_all(self.context)
self.assertEqual(0, len(instances))
build_requests = objects.BuildRequestList.get_all(self.context)
self.assertEqual(0, len(build_requests))
@db_api.api_context_manager.reader
def request_spec_get_all(context):
return context.session.query(api_models.RequestSpec).all()
request_specs = request_spec_get_all(self.context)
self.assertEqual(0, len(request_specs))
instance_mappings = objects.InstanceMappingList.get_by_project_id(
self.context, project_id)
self.assertEqual(0, len(instance_mappings))
@mock.patch('nova.objects.quotas.Quotas.check_deltas')
def test_create_instance_no_quota_recheck(
self, check_deltas_mock):
self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
self.fake_show)
# Disable recheck_quota.
self.flags(recheck_quota=False, group='quota')
inst_type = objects.Flavor.get_by_name(self.context, 'm1.small')
(refs, resv_id) = self.compute_api.create(self.context,
inst_type,
self.fake_image['id'])
self.assertEqual(1, len(refs))
project_id = self.context.project_id
# check_deltas should have been called only once.
check_deltas_mock.assert_called_once_with(self.context,
{'instances': 1,
'cores': inst_type.vcpus,
'ram': inst_type.memory_mb},
project_id, user_id=None,
check_project_id=project_id,
check_user_id=None)
@mock.patch.object(compute_api.API, '_local_delete')
@mock.patch.object(compute_api.API, '_lookup_instance',
return_value=(None, None))
def test_delete_instance_no_cell_instance_disappear(self, mock_lookup,
mock_local_delete):
inst = self._create_fake_instance_obj()
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere')
def test(mock_inst_del):
self.compute_api.delete(self.context, inst)
mock_lookup.assert_called_once_with(self.context, inst.uuid)
mock_inst_del.assert_called_once_with(self.context, inst, 'hard')
self.assertFalse(mock_local_delete.called)
test()
@mock.patch.object(compute_api.API, '_local_delete')
def _test_delete_instance_no_cell(self, method_name, mock_local_delete):
cells_rpcapi = self.compute_api.cells_rpcapi
inst = self._create_fake_instance_obj()
delete_type = method_name == 'soft_delete' and 'soft' or 'hard'
@mock.patch.object(cells_rpcapi,
'instance_delete_everywhere')
@mock.patch.object(compute_api.API, '_lookup_instance',
return_value=(None, inst))
def test(mock_lookup, mock_inst_del):
self.stub_out('nova.network.api.deallocate_for_instance',
lambda *a, **kw: None)
getattr(self.compute_api, method_name)(self.context, inst)
mock_lookup.assert_called_once_with(self.context, inst.uuid)
mock_local_delete.assert_called_once_with(self.context, inst,
mock.ANY, method_name,
mock.ANY)
mock_inst_del.assert_called_once_with(self.context,
inst, delete_type)
test()
def test_delete_instance_no_cell_constraint_failure_does_not_loop(self):
inst = self._create_fake_instance_obj()
inst.cell_name = None
inst.destroy = mock.MagicMock()
inst.destroy.side_effect = exception.ObjectActionError(action='',
reason='')
inst.refresh = mock.MagicMock()
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere')
@mock.patch.object(compute_api.API, '_lookup_instance',
return_value=(None, inst))
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
def _test(mock_get_im, _mock_lookup_inst, _mock_delete_everywhere):
self.assertRaises(exception.ObjectActionError,
self.compute_api.delete, self.context, inst)
inst.destroy.assert_called_once_with()
mock_get_im.assert_called_once_with(self.context, inst.uuid)
_test()
def test_delete_instance_no_cell_constraint_failure_corrects_itself(self):
def add_cell_name(context, instance, delete_type):
instance.cell_name = 'fake_cell_name'
inst = self._create_fake_instance_obj()
inst.cell_name = None
inst.destroy = mock.MagicMock()
inst.destroy.side_effect = exception.ObjectActionError(action='',
reason='')
inst.refresh = mock.MagicMock()
@mock.patch.object(compute_api.API, 'delete')
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere', side_effect=add_cell_name)
@mock.patch.object(compute_api.API, '_lookup_instance',
return_value=(None, inst))
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
def _test(mock_get_im, _mock_lookup_inst, mock_delete_everywhere,
mock_compute_delete):
self.compute_api.delete(self.context, inst)
inst.destroy.assert_called_once_with()
mock_compute_delete.assert_called_once_with(self.context, inst)
mock_get_im.assert_called_once_with(self.context, inst.uuid)
_test()
def test_soft_delete_instance_no_cell(self):
self._test_delete_instance_no_cell('soft_delete')
def test_delete_instance_no_cell(self):
self._test_delete_instance_no_cell('delete')
def test_force_delete_instance_no_cell(self):
self._test_delete_instance_no_cell('force_delete')
@mock.patch.object(compute_api.API, '_delete_while_booting',
side_effect=exception.ObjectActionError(
action='delete', reason='host now set'))
@mock.patch.object(compute_api.API, '_local_delete')
@mock.patch.object(compute_api.API, '_lookup_instance')
@mock.patch.object(compute_api.API, 'delete')
def test_delete_instance_no_cell_then_cell(self, mock_delete,
mock_lookup_instance,
mock_local_delete,
mock_delete_while_booting):
# This checks the case where initially an instance has no cell_name,
# and therefore no host, set but instance.destroy fails because
# there is now a host.
instance = self._create_fake_instance_obj()
instance_with_cell = copy.deepcopy(instance)
instance_with_cell.cell_name = 'foo'
mock_lookup_instance.return_value = None, instance_with_cell
cells_rpcapi = self.compute_api.cells_rpcapi
@mock.patch.object(cells_rpcapi, 'instance_delete_everywhere')
def test(mock_inst_delete_everywhere):
self.compute_api.delete(self.context, instance)
mock_local_delete.assert_not_called()
mock_delete.assert_called_once_with(self.context,
instance_with_cell)
test()
@mock.patch.object(compute_api.API, '_delete_while_booting',
side_effect=exception.ObjectActionError(
action='delete', reason='host now set'))
@mock.patch.object(compute_api.API, '_local_delete')
@mock.patch.object(compute_api.API, '_lookup_instance')
@mock.patch.object(compute_api.API, 'delete')
def test_delete_instance_no_cell_then_no_instance(self,
mock_delete, mock_lookup_instance, mock_local_delete,
mock_delete_while_booting):
# This checks the case where initially an instance has no cell_name,
# and therefore no host, set but instance.destroy fails because
# there is now a host. And then the instance can't be looked up.
instance = self._create_fake_instance_obj()
mock_lookup_instance.return_value = None, None
cells_rpcapi = self.compute_api.cells_rpcapi
@mock.patch.object(cells_rpcapi, 'instance_delete_everywhere')
def test(mock_inst_delete_everywhere):
self.compute_api.delete(self.context, instance)
mock_local_delete.assert_not_called()
mock_delete.assert_not_called()
test()
def test_get_migrations(self):
filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
migrations = {'migrations': [{'id': 1234}]}
@mock.patch.object(self.compute_api.cells_rpcapi, 'get_migrations',
return_value=migrations)
def test(mock_cell_get_migrations):
response = self.compute_api.get_migrations(self.context,
filters)
mock_cell_get_migrations.assert_called_once_with(self.context,
filters)
self.assertEqual(migrations, response)
test()
def test_create_block_device_mapping(self):
instance_type = {'swap': 1, 'ephemeral_gb': 1}
instance = self._create_fake_instance_obj()
bdms = [block_device.BlockDeviceDict({'source_type': 'image',
'destination_type': 'local',
'image_id': uuids.image,
'boot_index': 0})]
self.compute_api._create_block_device_mapping(
instance_type, instance.uuid, bdms)
bdms = db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])
self.assertEqual(0, len(bdms))
def test_create_bdm_from_flavor(self):
self.skipTest("Test is incompatible with cells.")
@mock.patch('nova.cells.messaging._TargetedMessage')
def test_rebuild_sig(self, mock_msg):
# TODO(belliott) Cells could benefit from better testing to ensure API
# and manager signatures stay up to date
def wire(version):
# wire the rpc cast directly to the manager method to make sure
# the signature matches
cells_mgr = manager.CellsManager()
def cast(context, method, *args, **kwargs):
fn = getattr(cells_mgr, method)
fn(context, *args, **kwargs)
cells_mgr.cast = cast
return cells_mgr
cells_rpcapi = self.compute_api.cells_rpcapi
client = cells_rpcapi.client
with mock.patch.object(client, 'prepare', side_effect=wire):
inst = self._create_fake_instance_obj()
inst.cell_name = 'mycell'
cells_rpcapi.rebuild_instance(self.context, inst, 'pass', None,
None, None, None, None,
recreate=False,
on_shared_storage=False, host='host',
preserve_ephemeral=True, kwargs=None)
# one targeted message should have been created
self.assertEqual(1, mock_msg.call_count)
def test_populate_instance_for_create(self):
self.skipTest("Removing cells v1")
def test_multi_instance_display_name(self):
self.skipTest("Removing cells v1")
@ddt.data(True, False)
def test_rdp_console(self, enabled_consoleauth):
self.skipTest("Removing cells v1")
@ddt.data(True, False)
def test_spice_console(self, enabled_consoleauth):
self.skipTest("Removing cells v1")
@ddt.data(True, False)
def test_vnc_console(self, enabled_consoleauth):
self.skipTest("Removing cells v1")
class CellsShelveComputeAPITestCase(test_shelve.ShelveComputeAPITestCase):
def setUp(self):
super(CellsShelveComputeAPITestCase, self).setUp()
global ORIG_COMPUTE_API
ORIG_COMPUTE_API = self.compute_api
self.compute_api = compute_cells_api.ComputeCellsAPI()
def _fake_validate_cell(*args, **kwargs):
return
self.stub_out('nova.compute.api.API._validate_cell',
_fake_validate_cell)
def _create_fake_instance_obj(self, params=None, type_name='m1.tiny',
services=False, context=None):
flavor = objects.Flavor.get_by_name(self.context, type_name)
inst = objects.Instance(context=context or self.context)
inst.cell_name = 'api!child'
inst.vm_state = vm_states.ACTIVE
inst.task_state = None
inst.power_state = power_state.RUNNING
inst.image_ref = FAKE_IMAGE_REF
inst.reservation_id = 'r-fakeres'
inst.user_id = self.user_id
inst.project_id = self.project_id
inst.host = self.compute.host
inst.node = NODENAME
inst.instance_type_id = flavor.id
inst.ami_launch_index = 0
inst.memory_mb = 0
inst.vcpus = 0
inst.root_gb = 0
inst.ephemeral_gb = 0
inst.architecture = obj_fields.Architecture.X86_64
inst.os_type = 'Linux'
inst.system_metadata = (
params and params.get('system_metadata', {}) or {})
inst.locked = False
inst.created_at = timeutils.utcnow()
inst.updated_at = timeutils.utcnow()
inst.launched_at = timeutils.utcnow()
inst.security_groups = objects.SecurityGroupList(objects=[])
inst.flavor = flavor
inst.old_flavor = None
inst.new_flavor = None
if params:
inst.flavor.update(params.pop('flavor', {}))
inst.update(params)
inst.create()
return inst
def _test_shelve(self, vm_state=vm_states.ACTIVE,
boot_from_volume=False, clean_shutdown=True):
params = dict(task_state=None, vm_state=vm_state,
display_name='fake-name')
instance = self._create_fake_instance_obj(params=params)
with mock.patch.object(self.compute_api,
'_cast_to_cells') as cast_to_cells:
self.compute_api.shelve(self.context, instance,
clean_shutdown=clean_shutdown)
cast_to_cells.assert_called_once_with(self.context,
instance, 'shelve',
clean_shutdown=clean_shutdown
)
def test_unshelve(self):
# Ensure instance can be unshelved on cell environment.
# The super class tests nova-shelve.
instance = self._create_fake_instance_obj()
self.assertIsNone(instance['task_state'])
self.compute_api.shelve(self.context, instance)
instance.task_state = None
instance.vm_state = vm_states.SHELVED
instance.save()
with mock.patch.object(self.compute_api,
'_cast_to_cells') as cast_to_cells:
self.compute_api.unshelve(self.context, instance)
cast_to_cells.assert_called_once_with(self.context,
instance, 'unshelve')
def tearDown(self):
global ORIG_COMPUTE_API
self.compute_api = ORIG_COMPUTE_API
super(CellsShelveComputeAPITestCase, self).tearDown()
class CellsConductorAPIRPCRedirect(test.NoDBTestCase):
def setUp(self):
super(CellsConductorAPIRPCRedirect, self).setUp()
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.cells_rpcapi = mock.MagicMock()
self.compute_api.compute_task_api.cells_rpcapi = self.cells_rpcapi
self.context = context.RequestContext('fake', 'fake')
@mock.patch.object(compute_api.API, '_record_action_start')
@mock.patch.object(compute_api.API, '_provision_instances')
@mock.patch.object(compute_api.API, '_check_and_transform_bdm')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_validate_and_build_base_options')
@mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
def test_build_instances(self, _checks_for_create_and_rebuild,
_validate, _get_image, _check_bdm,
_provision, _record_action_start):
_get_image.return_value = (None, 'fake-image')
_validate.return_value = ({}, 1, None, ['default'], None)
_check_bdm.return_value = objects.BlockDeviceMappingList()
_provision.return_value = []
with mock.patch.object(self.compute_api.compute_task_api,
'schedule_and_build_instances') as sbi:
self.compute_api.create(self.context, 'fake-flavor', 'fake-image')
# Subsequent tests in class are verifying the hooking. We
# don't check args since this is verified in compute test
# code.
self.assertTrue(sbi.called)
@mock.patch.object(compute_api.API, '_validate_flavor_image_nostatus')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_record_action_start')
@mock.patch.object(compute_api.API, '_resize_cells_support')
@mock.patch.object(compute_utils, 'upsize_quota_delta')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(flavors, 'extract_flavor')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
def test_resize_instance(self, _bdms, _check, _extract, _save, _upsize,
_cells, _record, _spec_get_by_uuid,
mock_validate):
flavor = objects.Flavor(**test_flavor.fake_flavor)
_extract.return_value = flavor
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'])
instance.flavor = flavor
instance.old_flavor = instance.new_flavor = None
self.compute_api.resize(self.context, instance)
self.assertTrue(self.cells_rpcapi.resize_instance.called)
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_record_action_start')
@mock.patch.object(objects.Instance, 'save')
def test_live_migrate_instance(self, instance_save, _record, _get_spec,
mock_nodelist):
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'])
self.compute_api.live_migrate(self.context, instance,
True, True, 'fake_dest_host')
self.assertTrue(self.cells_rpcapi.live_migrate_instance.called)
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
@mock.patch.object(compute_api.API, '_record_action_start')
def test_rebuild_instance(self, _record_action_start,
_checks_for_create_and_rebuild, _check_auto_disk_config,
_get_image, bdm_get_by_instance_uuid, get_flavor, instance_save,
_req_spec_get_by_inst_uuid):
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(), image_ref=uuids.image_id,
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'])
get_flavor.return_value = {}
# The API request schema validates that a UUID is passed for the
# imageRef parameter so we need to provide an image.
image_href = uuids.image_id
image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': 'x86_64'},
"id": uuids.image_id}
admin_pass = ''
files_to_inject = []
bdms = objects.BlockDeviceMappingList()
_get_image.return_value = (None, image)
bdm_get_by_instance_uuid.return_value = bdms
self.compute_api.rebuild(self.context, instance, image_href,
admin_pass, files_to_inject)
self.assertTrue(self.cells_rpcapi.rebuild_instance.called)
def test_check_equal(self):
task_api = self.compute_api.compute_task_api
tests = set()
for (name, value) in inspect.getmembers(self, inspect.ismethod):
if name.startswith('test_') and name != 'test_check_equal':
tests.add(name[5:])
if tests != set(task_api.cells_compatible):
self.fail("Testcases not equivalent to cells_compatible list")
-191
View File
@@ -14,16 +14,11 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import fixtures
import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import testtools
from nova.api.openstack.compute import services
from nova.cells import utils as cells_utils
from nova import compute
from nova.compute import api as compute_api
from nova import context
@@ -563,192 +558,6 @@ class ComputeHostAPITestCase(test.TestCase):
[mock.call(self.ctxt, uuids.cn_uuid)] * 2)
class ComputeHostAPICellsTestCase(ComputeHostAPITestCase):
def setUp(self):
self.flags(enable=True, group='cells')
self.flags(cell_type='api', group='cells')
super(ComputeHostAPICellsTestCase, self).setUp()
@testtools.skip('cellsv1 does not use this')
def test_service_get_all_cells(self):
pass
@testtools.skip('cellsv1 does not use this')
def test_service_get_all_cells_with_failures(self):
pass
@testtools.skip('cellsv1 does not use this')
def test_service_get_all_cells_with_minimal_constructs(self):
pass
@testtools.skip('cellsv1 does not use this')
def test_service_delete_ambiguous(self):
pass
def test_service_get_all_no_zones(self):
services = [
cells_utils.ServiceProxy(
objects.Service(id=1, topic='compute', host='host1'),
'cell1'),
cells_utils.ServiceProxy(
objects.Service(id=2, topic='compute', host='host2'),
'cell1')]
fake_filters = {'host': 'host1'}
@mock.patch.object(self.host_api.cells_rpcapi, 'service_get_all')
def _do_test(mock_service_get_all):
mock_service_get_all.return_value = services
result = self.host_api.service_get_all(self.ctxt,
filters=fake_filters)
self.assertEqual(services, result)
_do_test()
def _test_service_get_all(self, fake_filters, **kwargs):
service_attrs = dict(test_service.fake_service)
del service_attrs['version']
services = [
cells_utils.ServiceProxy(
objects.Service(**dict(service_attrs, id=1,
topic='compute', host='host1')),
'cell1'),
cells_utils.ServiceProxy(
objects.Service(**dict(service_attrs, id=2,
topic='compute', host='host2')),
'cell1')]
exp_services = []
for service in services:
exp_service = copy.copy(service)
exp_service.update({'availability_zone': 'nova'})
exp_services.append(exp_service)
@mock.patch.object(self.host_api.cells_rpcapi, 'service_get_all')
def _do_test(mock_service_get_all):
mock_service_get_all.return_value = services
result = self.host_api.service_get_all(self.ctxt,
filters=fake_filters,
**kwargs)
mock_service_get_all.assert_called_once_with(self.ctxt,
filters=fake_filters)
self.assertEqual(jsonutils.to_primitive(exp_services),
jsonutils.to_primitive(result))
_do_test()
def test_service_get_all(self):
fake_filters = {'availability_zone': 'nova'}
self._test_service_get_all(fake_filters)
def test_service_get_all_set_zones(self):
fake_filters = {'key1': 'val1'}
self._test_service_get_all(fake_filters, set_zones=True)
def test_service_get_by_compute_host(self):
obj = objects.Service(id=1, host='fake')
fake_service = cells_utils.ServiceProxy(obj, 'cell1')
@mock.patch.object(self.host_api.cells_rpcapi,
'service_get_by_compute_host')
def _do_test(mock_service_get_by_compute_host):
mock_service_get_by_compute_host.return_value = fake_service
result = self.host_api.service_get_by_compute_host(self.ctxt,
'fake-host')
self.assertEqual(fake_service, result)
_do_test()
def test_service_update(self):
host_name = 'fake-host'
binary = 'nova-compute'
params_to_update = dict(disabled=True)
obj = objects.Service(id=42, host='fake')
fake_service = cells_utils.ServiceProxy(obj, 'cell1')
@mock.patch.object(self.host_api.cells_rpcapi, 'service_update')
def _do_test(mock_service_update):
mock_service_update.return_value = fake_service
result = self.host_api.service_update(
self.ctxt, host_name, binary, params_to_update)
self.assertEqual(fake_service, result)
_do_test()
def test_service_delete(self):
cell_service_id = cells_utils.cell_with_item('cell1', 1)
with mock.patch.object(self.host_api.cells_rpcapi,
'service_delete') as service_delete:
self.host_api.service_delete(self.ctxt, cell_service_id)
service_delete.assert_called_once_with(
self.ctxt, cell_service_id)
@testtools.skip('cells do not support host aggregates')
def test_service_delete_compute_in_aggregate(self):
# this test is not valid for cell
pass
@mock.patch.object(objects.InstanceList, 'get_by_host')
def test_instance_get_all_by_host(self, mock_get):
instances = [dict(id=1, cell_name='cell1', host='host1'),
dict(id=2, cell_name='cell2', host='host1'),
dict(id=3, cell_name='cell1', host='host2')]
mock_get.return_value = instances
expected_result = [instances[0], instances[2]]
cell_and_host = cells_utils.cell_with_item('cell1', 'fake-host')
result = self.host_api.instance_get_all_by_host(self.ctxt,
cell_and_host)
self.assertEqual(expected_result, result)
def test_task_log_get_all(self):
@mock.patch.object(self.host_api.cells_rpcapi, 'task_log_get_all',
return_value='fake-response')
def _do_test(mock_task_log_get_all):
result = self.host_api.task_log_get_all(self.ctxt, 'fake-name',
'fake-begin', 'fake-end',
host='fake-host',
state='fake-state')
self.assertEqual('fake-response', result)
_do_test()
def test_get_host_uptime_service_down(self):
# The corresponding Compute test case depends on the
# _assert_host_exists which is a no-op in the cells api
pass
def test_get_host_uptime(self):
@mock.patch.object(self.host_api.cells_rpcapi, 'get_host_uptime',
return_value='fake-response')
def _do_test(mock_get_host_uptime):
result = self.host_api.get_host_uptime(self.ctxt, 'fake-host')
self.assertEqual('fake-response', result)
_do_test()
def test_compute_node_statistics(self):
# Not implementing cross-cellsv2 for cellsv1
pass
def test_compute_node_get_using_uuid(self):
cell_compute_uuid = cells_utils.cell_with_item('cell1', uuids.cn_uuid)
with mock.patch.object(self.host_api.cells_rpcapi,
'compute_node_get') as compute_node_get:
self.host_api.compute_node_get(self.ctxt, cell_compute_uuid)
compute_node_get.assert_called_once_with(self.ctxt, cell_compute_uuid)
def test_compute_node_get_not_found(self):
cell_compute_uuid = cells_utils.cell_with_item('cell1', uuids.cn_uuid)
with mock.patch.object(self.host_api.cells_rpcapi, 'compute_node_get',
side_effect=exception.CellRoutingInconsistency(
reason='because_cells_v1')):
self.assertRaises(exception.ComputeHostNotFound,
self.host_api.compute_node_get,
self.ctxt, cell_compute_uuid)
class ComputeAggregateAPITestCase(test.TestCase):
def setUp(self):
super(ComputeAggregateAPITestCase, self).setUp()
-2
View File
@@ -284,8 +284,6 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
self.fake_policy = jsonutils.loads(fake_policy.policy_data)
self.admin_only_rules = (
"cells_scheduler_filter:DifferentCellFilter",
"cells_scheduler_filter:TargetCellFilter",
"network:attach_external_network",
"os_compute_api:servers:create:forced_host",
"os_compute_api:servers:detail:get_all_tenants",
-2
View File
@@ -48,8 +48,6 @@ class TestProfiler(test.NoDBTestCase):
classes = [
'nova.api.manager.MetadataManager',
'nova.cells.manager.CellsManager',
'nova.cells.rpcapi.CellsAPI',
'nova.compute.api.API',
'nova.compute.manager.ComputeManager',
'nova.compute.rpcapi.ComputeAPI',