blueprint host-aggregates
This is the first of a series of commits that add the host-aggregates capability, as described on the blueprint page. This commit, more precisely, introduces changes to the Nova model: model classes related to aggregates have been added, as well as DB API methods to interact with the model; a sqlalchemy migration script plus a bunch of tests are also part of this changeset. Commits that will follow are going to add: - Extensions to OSAPI Admin, and related python_novaclient mappings - Implementation of the XenAPI virt layer - Integration of OSAPI and virt layer, via the compute_api - smoketests - openstack-manuals documentation These commits will be pushed for review not necessarily in this exact order. Change-Id: Iceb27609dc53bf4305c02d7cbc436fba4c4a7256
This commit is contained in:
@@ -0,0 +1,28 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Possible states for host aggregates.
|
||||
|
||||
An aggregate may be 'building', in which case the admin has triggered its
|
||||
creation, but the underlying hypervisor pool has not actually being created
|
||||
yet. An aggregate may be 'active', in which case the underlying hypervisor
|
||||
pool is up and running. An aggregate may be in 'error' in all other cases.
|
||||
"""
|
||||
|
||||
BUILDING = 'building'
|
||||
ACTIVE = 'active'
|
||||
ERROR = 'error'
|
||||
@@ -1741,6 +1741,65 @@ def sm_volume_get_all(context):
|
||||
####################
|
||||
|
||||
|
||||
def aggregate_create(context, values, metadata=None):
|
||||
"""Create a new aggregate with metadata."""
|
||||
return IMPL.aggregate_create(context, values, metadata)
|
||||
|
||||
|
||||
def aggregate_get(context, aggregate_id, read_deleted='no'):
|
||||
"""Get a specific aggregate by id."""
|
||||
return IMPL.aggregate_get(context, aggregate_id, read_deleted)
|
||||
|
||||
|
||||
def aggregate_update(context, aggregate_id, values):
|
||||
"""Update the attributes of an aggregates. If values contains a metadata
|
||||
key, it updates the aggregate metadata too."""
|
||||
return IMPL.aggregate_update(context, aggregate_id, values)
|
||||
|
||||
|
||||
def aggregate_delete(context, aggregate_id):
|
||||
"""Delete an aggregate."""
|
||||
return IMPL.aggregate_delete(context, aggregate_id)
|
||||
|
||||
|
||||
def aggregate_get_all(context, read_deleted='yes'):
|
||||
"""Get all aggregates."""
|
||||
return IMPL.aggregate_get_all(context, read_deleted)
|
||||
|
||||
|
||||
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
|
||||
"""Add/update metadata. If set_delete=True, it adds only."""
|
||||
IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete)
|
||||
|
||||
|
||||
def aggregate_metadata_get(context, aggregate_id, read_deleted='no'):
|
||||
"""Get metadata for the specified aggregate."""
|
||||
return IMPL.aggregate_metadata_get(context, aggregate_id, read_deleted)
|
||||
|
||||
|
||||
def aggregate_metadata_delete(context, aggregate_id, key):
|
||||
"""Delete the given metadata key."""
|
||||
IMPL.aggregate_metadata_delete(context, aggregate_id, key)
|
||||
|
||||
|
||||
def aggregate_host_add(context, aggregate_id, host):
|
||||
"""Add host to the aggregate."""
|
||||
IMPL.aggregate_host_add(context, aggregate_id, host)
|
||||
|
||||
|
||||
def aggregate_host_get_all(context, aggregate_id, read_deleted='yes'):
|
||||
"""Get hosts for the specified aggregate."""
|
||||
return IMPL.aggregate_host_get_all(context, aggregate_id, read_deleted)
|
||||
|
||||
|
||||
def aggregate_host_delete(context, aggregate_id, host):
|
||||
"""Delete the given host from the aggregate."""
|
||||
IMPL.aggregate_host_delete(context, aggregate_id, host)
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
def instance_fault_create(context, values):
|
||||
"""Create a new Instance Fault."""
|
||||
return IMPL.instance_fault_create(context, values)
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
"""Implementation of SQLAlchemy backend."""
|
||||
|
||||
import datetime
|
||||
import functools
|
||||
import re
|
||||
import warnings
|
||||
|
||||
@@ -29,6 +30,7 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
from nova import log as logging
|
||||
from nova.compute import aggregate_states
|
||||
from nova.compute import vm_states
|
||||
from nova.db.sqlalchemy import models
|
||||
from nova.db.sqlalchemy.session import get_session
|
||||
@@ -143,6 +145,20 @@ def require_volume_exists(f):
|
||||
return wrapper
|
||||
|
||||
|
||||
def require_aggregate_exists(f):
|
||||
"""Decorator to require the specified aggregate to exist.
|
||||
|
||||
Requires the wrapped function to use context and aggregate_id as
|
||||
their first two arguments.
|
||||
"""
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapper(context, aggregate_id, *args, **kwargs):
|
||||
db.aggregate_get(context, aggregate_id)
|
||||
return f(context, aggregate_id, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def model_query(context, *args, **kwargs):
|
||||
"""Query helper that accounts for context's `read_deleted` field.
|
||||
|
||||
@@ -3953,6 +3969,218 @@ def sm_volume_get_all(context):
|
||||
################
|
||||
|
||||
|
||||
def _aggregate_get_query(context, model_class, id_field, id,
|
||||
session=None, read_deleted='yes'):
|
||||
return model_query(context, model_class, session=session,
|
||||
read_deleted=read_deleted).filter(id_field == id)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def aggregate_create(context, values, metadata=None):
|
||||
try:
|
||||
aggregate = models.Aggregate()
|
||||
aggregate.update(values)
|
||||
aggregate.operational_state = aggregate_states.BUILDING
|
||||
aggregate.save()
|
||||
except exception.DBError:
|
||||
raise exception.AggregateNameExists(aggregate_name=values['name'])
|
||||
if metadata:
|
||||
aggregate_metadata_add(context, aggregate.id, metadata)
|
||||
return aggregate
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def aggregate_get(context, aggregate_id, read_deleted='no'):
|
||||
aggregate = _aggregate_get_query(context,
|
||||
models.Aggregate,
|
||||
models.Aggregate.id, aggregate_id,
|
||||
read_deleted=read_deleted).first()
|
||||
|
||||
if not aggregate:
|
||||
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
|
||||
|
||||
return aggregate
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def aggregate_update(context, aggregate_id, values):
|
||||
session = get_session()
|
||||
aggregate = _aggregate_get_query(context,
|
||||
models.Aggregate,
|
||||
models.Aggregate.id, aggregate_id,
|
||||
session=session,
|
||||
read_deleted='no').first()
|
||||
if aggregate:
|
||||
metadata = values.get('metadata')
|
||||
if metadata is not None:
|
||||
aggregate_metadata_add(context,
|
||||
aggregate_id,
|
||||
values.pop('metadata'),
|
||||
set_delete=True)
|
||||
with session.begin():
|
||||
aggregate.update(values)
|
||||
aggregate.save(session=session)
|
||||
values['metadata'] = metadata
|
||||
return aggregate
|
||||
else:
|
||||
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def aggregate_delete(context, aggregate_id):
|
||||
query = _aggregate_get_query(context,
|
||||
models.Aggregate,
|
||||
models.Aggregate.id, aggregate_id,
|
||||
read_deleted='no')
|
||||
if query.first():
|
||||
query.update({'deleted': True,
|
||||
'deleted_at': utils.utcnow(),
|
||||
'updated_at': literal_column('updated_at')})
|
||||
else:
|
||||
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def aggregate_get_all(context, read_deleted='yes'):
|
||||
return model_query(context,
|
||||
models.Aggregate,
|
||||
read_deleted=read_deleted).all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@require_aggregate_exists
|
||||
def aggregate_metadata_get(context, aggregate_id, read_deleted='no'):
|
||||
rows = model_query(context,
|
||||
models.AggregateMetadata,
|
||||
read_deleted=read_deleted).\
|
||||
filter_by(aggregate_id=aggregate_id).all()
|
||||
|
||||
return dict([(r['key'], r['value']) for r in rows])
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@require_aggregate_exists
|
||||
def aggregate_metadata_delete(context, aggregate_id, key):
|
||||
query = _aggregate_get_query(context,
|
||||
models.AggregateMetadata,
|
||||
models.AggregateMetadata.aggregate_id,
|
||||
aggregate_id, read_deleted='no').\
|
||||
filter_by(key=key)
|
||||
if query.first():
|
||||
query.update({'deleted': True,
|
||||
'deleted_at': utils.utcnow(),
|
||||
'updated_at': literal_column('updated_at')})
|
||||
else:
|
||||
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
|
||||
metadata_key=key)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@require_aggregate_exists
|
||||
def aggregate_metadata_get_item(context, aggregate_id, key,
|
||||
session=None, read_deleted='yes'):
|
||||
result = _aggregate_get_query(context,
|
||||
models.AggregateMetadata,
|
||||
models.AggregateMetadata.aggregate_id,
|
||||
aggregate_id, session=session,
|
||||
read_deleted=read_deleted).\
|
||||
filter_by(key=key).first()
|
||||
|
||||
if not result:
|
||||
raise exception.AggregateMetadataNotFound(metadata_key=key,
|
||||
aggregate_id=aggregate_id)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@require_aggregate_exists
|
||||
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
|
||||
session = get_session()
|
||||
|
||||
if set_delete:
|
||||
original_metadata = aggregate_metadata_get(context, aggregate_id)
|
||||
for meta_key, meta_value in original_metadata.iteritems():
|
||||
if meta_key not in metadata:
|
||||
meta_ref = aggregate_metadata_get_item(context, aggregate_id,
|
||||
meta_key, session)
|
||||
meta_ref.update({'deleted': True})
|
||||
meta_ref.save(session=session)
|
||||
|
||||
meta_ref = None
|
||||
|
||||
for meta_key, meta_value in metadata.iteritems():
|
||||
item = {"value": meta_value}
|
||||
try:
|
||||
meta_ref = aggregate_metadata_get_item(context, aggregate_id,
|
||||
meta_key, session)
|
||||
if meta_ref.deleted:
|
||||
item.update({'deleted': False, 'deleted_at': None,
|
||||
'updated_at': literal_column('updated_at')})
|
||||
except exception.AggregateMetadataNotFound:
|
||||
meta_ref = models.AggregateMetadata()
|
||||
item.update({"key": meta_key, "aggregate_id": aggregate_id})
|
||||
|
||||
meta_ref.update(item)
|
||||
meta_ref.save(session=session)
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@require_aggregate_exists
|
||||
def aggregate_host_get_all(context, aggregate_id, read_deleted='yes'):
|
||||
rows = model_query(context,
|
||||
models.AggregateHost,
|
||||
read_deleted=read_deleted).\
|
||||
filter_by(aggregate_id=aggregate_id).all()
|
||||
|
||||
return [r.host for r in rows]
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@require_aggregate_exists
|
||||
def aggregate_host_delete(context, aggregate_id, host):
|
||||
query = _aggregate_get_query(context,
|
||||
models.AggregateHost,
|
||||
models.AggregateHost.aggregate_id,
|
||||
aggregate_id,
|
||||
read_deleted='no').filter_by(host=host)
|
||||
if query.first():
|
||||
query.update({'deleted': True,
|
||||
'deleted_at': utils.utcnow(),
|
||||
'updated_at': literal_column('updated_at')})
|
||||
else:
|
||||
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
|
||||
host=host)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@require_aggregate_exists
|
||||
def aggregate_host_add(context, aggregate_id, host):
|
||||
host_ref = _aggregate_get_query(context,
|
||||
models.AggregateHost,
|
||||
models.AggregateHost.aggregate_id,
|
||||
aggregate_id,
|
||||
read_deleted='no').\
|
||||
filter_by(host=host).first()
|
||||
if not host_ref:
|
||||
try:
|
||||
host_ref = models.AggregateHost()
|
||||
values = {"host": host, "aggregate_id": aggregate_id, }
|
||||
host_ref.update(values)
|
||||
host_ref.save()
|
||||
except exception.DBError:
|
||||
raise exception.AggregateHostConflict(host=host)
|
||||
else:
|
||||
raise exception.AggregateHostExists(host=host,
|
||||
aggregate_id=aggregate_id)
|
||||
return host_ref
|
||||
|
||||
|
||||
################
|
||||
|
||||
|
||||
def instance_fault_create(context, values):
|
||||
"""Create a new InstanceFault."""
|
||||
fault_ref = models.InstanceFault()
|
||||
|
||||
@@ -0,0 +1,97 @@
|
||||
# Copyright (c) 2011 Citrix Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import Boolean, String, DateTime, Integer
|
||||
from sqlalchemy import MetaData, Column, ForeignKey, Table
|
||||
|
||||
from nova import log as logging
|
||||
|
||||
|
||||
meta = MetaData()
|
||||
|
||||
|
||||
aggregates = Table('aggregates', meta,
|
||||
Column('created_at', DateTime(timezone=False)),
|
||||
Column('updated_at', DateTime(timezone=False)),
|
||||
Column('deleted_at', DateTime(timezone=False)),
|
||||
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||
Column('id', Integer(),
|
||||
primary_key=True, nullable=False, autoincrement=True),
|
||||
Column('name',
|
||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False),
|
||||
unique=True),
|
||||
Column('operational_state',
|
||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False),
|
||||
nullable=False),
|
||||
Column('availability_zone',
|
||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False),
|
||||
nullable=False),
|
||||
)
|
||||
|
||||
hosts = Table('aggregate_hosts', meta,
|
||||
Column('created_at', DateTime(timezone=False)),
|
||||
Column('updated_at', DateTime(timezone=False)),
|
||||
Column('deleted_at', DateTime(timezone=False)),
|
||||
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||
Column('id', Integer(), primary_key=True, nullable=False),
|
||||
Column('host',
|
||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False),
|
||||
unique=True),
|
||||
Column('aggregate_id', Integer(), ForeignKey('aggregates.id'),
|
||||
nullable=False),
|
||||
)
|
||||
|
||||
metadata = Table('aggregate_metadata', meta,
|
||||
Column('created_at', DateTime(timezone=False)),
|
||||
Column('updated_at', DateTime(timezone=False)),
|
||||
Column('deleted_at', DateTime(timezone=False)),
|
||||
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||
Column('id', Integer(), primary_key=True, nullable=False),
|
||||
Column('aggregate_id',
|
||||
Integer(),
|
||||
ForeignKey('aggregates.id'),
|
||||
nullable=False),
|
||||
Column('key',
|
||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False),
|
||||
nullable=False),
|
||||
Column('value',
|
||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False),
|
||||
nullable=False))
|
||||
|
||||
|
||||
tables = (aggregates, hosts, metadata)
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
||||
for table in tables:
|
||||
try:
|
||||
table.create()
|
||||
except Exception:
|
||||
logging.exception(repr(table))
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
||||
for table in tables:
|
||||
try:
|
||||
table.drop()
|
||||
except Exception:
|
||||
logging.exception(repr(table))
|
||||
@@ -849,6 +849,42 @@ class Zone(BASE, NovaBase):
|
||||
weight_scale = Column(Float(), default=1.0)
|
||||
|
||||
|
||||
class Aggregate(BASE, NovaBase):
|
||||
"""Represents a cluster of hosts that exists in this zone."""
|
||||
__tablename__ = 'aggregates'
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
name = Column(String(255), unique=True)
|
||||
operational_state = Column(String(255), nullable=False)
|
||||
availability_zone = Column(String(255), nullable=False)
|
||||
|
||||
|
||||
class AggregateHost(BASE, NovaBase):
|
||||
"""Represents a host that is member of an aggregate."""
|
||||
__tablename__ = 'aggregate_hosts'
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
host = Column(String(255), unique=True)
|
||||
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
|
||||
aggregate = relationship(Aggregate, backref=backref('aggregates'),
|
||||
foreign_keys=aggregate_id,
|
||||
primaryjoin='and_('
|
||||
'AggregateHost.aggregate_id == Aggregate.id,'
|
||||
'AggregateHost.deleted == False)')
|
||||
|
||||
|
||||
class AggregateMetadata(BASE, NovaBase):
|
||||
"""Represents a metadata key/value pair for an aggregate."""
|
||||
__tablename__ = 'aggregate_metadata'
|
||||
id = Column(Integer, primary_key=True)
|
||||
key = Column(String(255), nullable=False)
|
||||
value = Column(String(255), nullable=False)
|
||||
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
|
||||
aggregate = relationship(Aggregate, backref="metadata",
|
||||
foreign_keys=aggregate_id,
|
||||
primaryjoin='and_('
|
||||
'AggregateMetadata.aggregate_id == Aggregate.id,'
|
||||
'AggregateMetadata.deleted == False)')
|
||||
|
||||
|
||||
class AgentBuild(BASE, NovaBase):
|
||||
"""Represents an agent build."""
|
||||
__tablename__ = 'agent_builds'
|
||||
|
||||
@@ -884,3 +884,28 @@ class WillNotSchedule(NovaException):
|
||||
class QuotaError(ApiError):
|
||||
"""Quota Exceeded."""
|
||||
pass
|
||||
|
||||
|
||||
class AggregateNotFound(NotFound):
|
||||
message = _("Aggregate %(aggregate_id)s could not be found.")
|
||||
|
||||
|
||||
class AggregateNameExists(Duplicate):
|
||||
message = _("Aggregate %(aggregate_name)s already exists.")
|
||||
|
||||
|
||||
class AggregateHostNotFound(NotFound):
|
||||
message = _("Aggregate %(aggregate_id)s has no host %(host)s.")
|
||||
|
||||
|
||||
class AggregateMetadataNotFound(NotFound):
|
||||
message = _("Aggregate %(aggregate_id)s has no metadata with "
|
||||
"key %(metadata_key)s.")
|
||||
|
||||
|
||||
class AggregateHostConflict(Duplicate):
|
||||
message = _("Host %(host)s already member of another aggregate.")
|
||||
|
||||
|
||||
class AggregateHostExists(Duplicate):
|
||||
message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
|
||||
|
||||
@@ -22,6 +22,7 @@ import datetime
|
||||
|
||||
from nova import test
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import utils
|
||||
@@ -257,3 +258,266 @@ class DbApiTestCase(test.TestCase):
|
||||
instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids)
|
||||
expected = {uuids[0]: [], uuids[1]: []}
|
||||
self.assertEqual(expected, instance_faults)
|
||||
|
||||
|
||||
def _get_fake_aggr_values():
|
||||
return {'name': 'fake_aggregate',
|
||||
'availability_zone': 'fake_avail_zone', }
|
||||
|
||||
|
||||
def _get_fake_aggr_metadata():
|
||||
return {'fake_key1': 'fake_value1',
|
||||
'fake_key2': 'fake_value2'}
|
||||
|
||||
|
||||
def _get_fake_aggr_hosts():
|
||||
return ['foo.openstack.org']
|
||||
|
||||
|
||||
def _create_aggregate(context=context.get_admin_context(),
|
||||
values=_get_fake_aggr_values(),
|
||||
metadata=_get_fake_aggr_metadata()):
|
||||
return db.aggregate_create(context, values, metadata)
|
||||
|
||||
|
||||
def _create_aggregate_with_hosts(context=context.get_admin_context(),
|
||||
values=_get_fake_aggr_values(),
|
||||
metadata=_get_fake_aggr_metadata(),
|
||||
hosts=_get_fake_aggr_hosts()):
|
||||
result = _create_aggregate(context=context,
|
||||
values=values, metadata=metadata)
|
||||
for host in hosts:
|
||||
db.aggregate_host_add(context, result.id, host)
|
||||
return result
|
||||
|
||||
|
||||
class AggregateDBApiTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(AggregateDBApiTestCase, self).setUp()
|
||||
self.user_id = 'fake'
|
||||
self.project_id = 'fake'
|
||||
self.context = context.RequestContext(self.user_id, self.project_id)
|
||||
|
||||
def tearDown(self):
|
||||
super(AggregateDBApiTestCase, self).tearDown()
|
||||
|
||||
def test_aggregate_create(self):
|
||||
"""Ensure aggregate can be created with no metadata."""
|
||||
result = _create_aggregate(metadata=None)
|
||||
self.assertEqual(result['operational_state'], 'building')
|
||||
|
||||
def test_aggregate_create_raise_exist_exc(self):
|
||||
"""Ensure aggregate names are distinct."""
|
||||
_create_aggregate(metadata=None)
|
||||
self.assertRaises(exception.AggregateNameExists,
|
||||
_create_aggregate, metadata=None)
|
||||
|
||||
def test_aggregate_get_raise_not_found(self):
|
||||
"""Ensure AggregateNotFound is raised when getting an aggregate."""
|
||||
ctxt = context.get_admin_context()
|
||||
# this does not exist!
|
||||
aggregate_id = 1
|
||||
self.assertRaises(exception.AggregateNotFound,
|
||||
db.aggregate_get,
|
||||
ctxt, aggregate_id)
|
||||
|
||||
def test_aggregate_metadata_get_raise_not_found(self):
|
||||
"""Ensure AggregateNotFound is raised when getting metadata."""
|
||||
ctxt = context.get_admin_context()
|
||||
# this does not exist!
|
||||
aggregate_id = 1
|
||||
self.assertRaises(exception.AggregateNotFound,
|
||||
db.aggregate_metadata_get,
|
||||
ctxt, aggregate_id)
|
||||
|
||||
def test_aggregate_create_with_metadata(self):
|
||||
"""Ensure aggregate can be created with metadata."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate(context=ctxt)
|
||||
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
|
||||
self.assertDictMatch(expected_metadata, _get_fake_aggr_metadata())
|
||||
|
||||
def test_aggregate_create_low_privi_context(self):
|
||||
"""Ensure right context is applied when creating aggregate."""
|
||||
self.assertRaises(exception.AdminRequired,
|
||||
db.aggregate_create,
|
||||
self.context, _get_fake_aggr_values())
|
||||
|
||||
def test_aggregate_delete_raise_not_found(self):
|
||||
"""Ensure AggregateNotFound is raised when deleting an aggregate."""
|
||||
ctxt = context.get_admin_context()
|
||||
# this does not exist!
|
||||
aggregate_id = 1
|
||||
self.assertRaises(exception.AggregateNotFound,
|
||||
db.aggregate_delete,
|
||||
ctxt, aggregate_id)
|
||||
|
||||
def test_aggregate_delete(self):
|
||||
"""Ensure we can delete an aggregate."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate(context=ctxt, metadata=None)
|
||||
db.aggregate_delete(ctxt, result['id'])
|
||||
expected = db.aggregate_get_all(ctxt, read_deleted='no')
|
||||
self.assertEqual(0, len(expected))
|
||||
|
||||
def test_aggregate_update(self):
|
||||
"""Ensure an aggregate can be updated."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate(context=ctxt, metadata=None)
|
||||
new_values = _get_fake_aggr_values()
|
||||
new_values['availability_zone'] = 'different_avail_zone'
|
||||
updated = db.aggregate_update(ctxt, 1, new_values)
|
||||
self.assertNotEqual(result.availability_zone,
|
||||
updated.availability_zone)
|
||||
|
||||
def test_aggregate_update_with_metadata(self):
|
||||
"""Ensure an aggregate can be updated with metadata."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate(context=ctxt, metadata=None)
|
||||
values = _get_fake_aggr_values()
|
||||
values['metadata'] = _get_fake_aggr_metadata()
|
||||
db.aggregate_update(ctxt, 1, values)
|
||||
expected = db.aggregate_metadata_get(ctxt, result.id)
|
||||
self.assertDictMatch(_get_fake_aggr_metadata(), expected)
|
||||
|
||||
def test_aggregate_update_with_existing_metadata(self):
|
||||
"""Ensure an aggregate can be updated with existing metadata."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate(context=ctxt)
|
||||
values = _get_fake_aggr_values()
|
||||
values['metadata'] = _get_fake_aggr_metadata()
|
||||
values['metadata']['fake_key1'] = 'foo'
|
||||
db.aggregate_update(ctxt, 1, values)
|
||||
expected = db.aggregate_metadata_get(ctxt, result.id)
|
||||
self.assertDictMatch(values['metadata'], expected)
|
||||
|
||||
def test_aggregate_update_raise_not_found(self):
|
||||
"""Ensure AggregateNotFound is raised when updating an aggregate."""
|
||||
ctxt = context.get_admin_context()
|
||||
# this does not exist!
|
||||
aggregate_id = 1
|
||||
new_values = _get_fake_aggr_values()
|
||||
self.assertRaises(exception.AggregateNotFound,
|
||||
db.aggregate_update, ctxt, aggregate_id, new_values)
|
||||
|
||||
def test_aggregate_get_all(self):
|
||||
"""Ensure we can get all aggregates."""
|
||||
ctxt = context.get_admin_context()
|
||||
counter = 3
|
||||
for c in xrange(counter):
|
||||
_create_aggregate(context=ctxt,
|
||||
values={'name': 'fake_aggregate_%d' % c,
|
||||
'availability_zone': 'fake_avail_zone'},
|
||||
metadata=None)
|
||||
results = db.aggregate_get_all(ctxt)
|
||||
self.assertEqual(len(results), counter)
|
||||
|
||||
def test_aggregate_get_all_non_deleted(self):
|
||||
"""Ensure we get only non-deleted aggregates."""
|
||||
ctxt = context.get_admin_context()
|
||||
add_counter = 5
|
||||
remove_counter = 2
|
||||
aggregates = []
|
||||
for c in xrange(1, add_counter):
|
||||
values = {'name': 'fake_aggregate_%d' % c,
|
||||
'availability_zone': 'fake_avail_zone'}
|
||||
aggregates.append(_create_aggregate(context=ctxt,
|
||||
values=values, metadata=None))
|
||||
for c in xrange(1, remove_counter):
|
||||
db.aggregate_delete(ctxt, aggregates[c - 1].id)
|
||||
results = db.aggregate_get_all(ctxt, read_deleted='no')
|
||||
self.assertEqual(len(results), add_counter - remove_counter)
|
||||
|
||||
def test_aggregate_metadata_add(self):
|
||||
"""Ensure we can add metadata for the aggregate."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate(context=ctxt, metadata=None)
|
||||
metadata = _get_fake_aggr_metadata()
|
||||
db.aggregate_metadata_add(ctxt, result.id, metadata)
|
||||
expected = db.aggregate_metadata_get(ctxt, result.id)
|
||||
self.assertDictMatch(metadata, expected)
|
||||
|
||||
def test_aggregate_metadata_update(self):
|
||||
"""Ensure we can update metadata for the aggregate."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate(context=ctxt)
|
||||
metadata = _get_fake_aggr_metadata()
|
||||
key = metadata.keys()[0]
|
||||
db.aggregate_metadata_delete(ctxt, result.id, key)
|
||||
new_metadata = {key: 'foo'}
|
||||
db.aggregate_metadata_add(ctxt, result.id, new_metadata)
|
||||
expected = db.aggregate_metadata_get(ctxt, result.id)
|
||||
metadata[key] = 'foo'
|
||||
self.assertDictMatch(metadata, expected)
|
||||
|
||||
def test_aggregate_metadata_delete(self):
|
||||
"""Ensure we can delete metadata for the aggregate."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate(context=ctxt, metadata=None)
|
||||
metadata = _get_fake_aggr_metadata()
|
||||
db.aggregate_metadata_add(ctxt, result.id, metadata)
|
||||
db.aggregate_metadata_delete(ctxt, result.id, metadata.keys()[0])
|
||||
expected = db.aggregate_metadata_get(ctxt, result.id)
|
||||
del metadata[metadata.keys()[0]]
|
||||
self.assertDictMatch(metadata, expected)
|
||||
|
||||
def test_aggregate_metadata_delete_raise_not_found(self):
|
||||
"""Ensure AggregateMetadataNotFound is raised when deleting."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate(context=ctxt)
|
||||
self.assertRaises(exception.AggregateMetadataNotFound,
|
||||
db.aggregate_metadata_delete,
|
||||
ctxt, result.id, 'foo_key')
|
||||
|
||||
def test_aggregate_host_add(self):
|
||||
"""Ensure we can add host to the aggregate."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
|
||||
expected = db.aggregate_host_get_all(ctxt, result.id)
|
||||
self.assertEqual(_get_fake_aggr_hosts(), expected)
|
||||
|
||||
def test_aggregate_host_add_duplicate_raise_conflict(self):
|
||||
"""Ensure we cannot add host to distinct aggregates."""
|
||||
ctxt = context.get_admin_context()
|
||||
_create_aggregate_with_hosts(context=ctxt, metadata=None)
|
||||
self.assertRaises(exception.AggregateHostConflict,
|
||||
_create_aggregate_with_hosts, ctxt,
|
||||
values={'name': 'fake_aggregate2',
|
||||
'availability_zone': 'fake_avail_zone2', },
|
||||
metadata=None)
|
||||
|
||||
def test_aggregate_host_add_duplicate_raise_exist_exc(self):
|
||||
"""Ensure we cannot add host to the same aggregate."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
|
||||
self.assertRaises(exception.AggregateHostExists,
|
||||
db.aggregate_host_add,
|
||||
ctxt, result.id, _get_fake_aggr_hosts()[0])
|
||||
|
||||
def test_aggregate_host_add_raise_not_found(self):
|
||||
"""Ensure AggregateFound when adding a host."""
|
||||
ctxt = context.get_admin_context()
|
||||
# this does not exist!
|
||||
aggregate_id = 1
|
||||
host = _get_fake_aggr_hosts()[0]
|
||||
self.assertRaises(exception.AggregateNotFound,
|
||||
db.aggregate_host_add,
|
||||
ctxt, aggregate_id, host)
|
||||
|
||||
def test_aggregate_host_delete(self):
|
||||
"""Ensure we can add host to the aggregate."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
|
||||
db.aggregate_host_delete(ctxt, result.id,
|
||||
_get_fake_aggr_hosts()[0])
|
||||
expected = db.aggregate_host_get_all(ctxt, result.id,
|
||||
read_deleted='no')
|
||||
self.assertEqual(0, len(expected))
|
||||
|
||||
def test_aggregate_host_delete_raise_not_found(self):
|
||||
"""Ensure AggregateHostNotFound is raised when deleting a host."""
|
||||
ctxt = context.get_admin_context()
|
||||
result = _create_aggregate(context=ctxt)
|
||||
self.assertRaises(exception.AggregateHostNotFound,
|
||||
db.aggregate_host_delete,
|
||||
ctxt, result.id, _get_fake_aggr_hosts()[0])
|
||||
|
||||
Reference in New Issue
Block a user