Merge "nova-manage: Add 'limits migrate_to_unified_limits'"
This commit is contained in:
@@ -1759,6 +1759,61 @@ for example.
|
||||
- The provided image property value is invalid
|
||||
|
||||
|
||||
Limits Commands
|
||||
===============
|
||||
|
||||
limits migrate_to_unified_limits
|
||||
--------------------------------
|
||||
|
||||
.. program:: nova-manage limits migrate_to_unified_limits
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
nova-manage limits migrate_to_unified_limits [--project-id <project-id>]
|
||||
[--region-id <region-id>] [--verbose] [--dry-run]
|
||||
|
||||
Migrate quota limits from the Nova database to unified limits in Keystone.
|
||||
|
||||
This command is useful for operators to migrate from legacy quotas to unified
|
||||
limits. Limits are migrated by copying them from the Nova database to Keystone
|
||||
by creating them using the Keystone API.
|
||||
|
||||
.. versionadded:: 28.0.0 (2023.2 Bobcat)
|
||||
|
||||
.. rubric:: Options
|
||||
|
||||
.. option:: --project-id <project-id>
|
||||
|
||||
The project ID for which to migrate quota limits.
|
||||
|
||||
.. option:: --region-id <region-id>
|
||||
|
||||
The region ID for which to migrate quota limits.
|
||||
|
||||
.. option:: --verbose
|
||||
|
||||
Provide verbose output during execution.
|
||||
|
||||
.. option:: --dry-run
|
||||
|
||||
Show what limits would be created without actually creating them.
|
||||
|
||||
.. rubric:: Return codes
|
||||
|
||||
.. list-table::
|
||||
:widths: 20 80
|
||||
:header-rows: 1
|
||||
|
||||
* - Return code
|
||||
- Description
|
||||
* - 0
|
||||
- Command completed successfully
|
||||
* - 1
|
||||
- An unexpected error occurred
|
||||
* - 2
|
||||
- Failed to connect to the database
|
||||
|
||||
|
||||
See Also
|
||||
========
|
||||
|
||||
|
||||
@@ -58,6 +58,8 @@ from nova.db.main import api as db
|
||||
from nova.db import migration
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.limit import local as local_limit
|
||||
from nova.limit import placement as placement_limit
|
||||
from nova.network import constants
|
||||
from nova.network import neutron as neutron_api
|
||||
from nova import objects
|
||||
@@ -70,6 +72,7 @@ from nova.objects import instance_mapping as instance_mapping_obj
|
||||
from nova.objects import pci_device as pci_device_obj
|
||||
from nova.objects import quotas as quotas_obj
|
||||
from nova.objects import virtual_interface as virtual_interface_obj
|
||||
import nova.quota
|
||||
from nova import rpc
|
||||
from nova.scheduler.client import report
|
||||
from nova.scheduler import utils as scheduler_utils
|
||||
@@ -3367,6 +3370,183 @@ class ImagePropertyCommands:
|
||||
return 1
|
||||
|
||||
|
||||
class LimitsCommands():
|
||||
|
||||
def _create_unified_limits(self, ctxt, legacy_defaults, project_id,
|
||||
region_id, output, dry_run):
|
||||
return_code = 0
|
||||
|
||||
# Create registered (default) limits first.
|
||||
unified_to_legacy_names = dict(
|
||||
**local_limit.LEGACY_LIMITS, **placement_limit.LEGACY_LIMITS)
|
||||
|
||||
legacy_to_unified_names = dict(
|
||||
zip(unified_to_legacy_names.values(),
|
||||
unified_to_legacy_names.keys()))
|
||||
|
||||
# For auth, a section for [keystone] is required in the config:
|
||||
#
|
||||
# [keystone]
|
||||
# region_name = RegionOne
|
||||
# user_domain_name = Default
|
||||
# password = <password>
|
||||
# username = <username>
|
||||
# auth_url = http://127.0.0.1/identity
|
||||
# auth_type = password
|
||||
# system_scope = all
|
||||
#
|
||||
# The configured user needs 'role:admin and system_scope:all' by
|
||||
# default in order to create limits in Keystone.
|
||||
keystone_api = utils.get_sdk_adapter('identity')
|
||||
|
||||
# Service ID is required in unified limits APIs.
|
||||
service_id = keystone_api.find_service('nova').id
|
||||
|
||||
# Retrieve the existing resource limits from Keystone.
|
||||
registered_limits = keystone_api.registered_limits(region_id=region_id)
|
||||
|
||||
unified_defaults = {
|
||||
rl.resource_name: rl.default_limit for rl in registered_limits}
|
||||
|
||||
# f-strings don't seem to work well with the _() translation function.
|
||||
msg = f'Found default limits in Keystone: {unified_defaults} ...'
|
||||
output(_(msg))
|
||||
|
||||
# Determine which resource limits are missing in Keystone so that we
|
||||
# can create them.
|
||||
output(_('Creating default limits in Keystone ...'))
|
||||
for resource, rlimit in legacy_defaults.items():
|
||||
resource_name = legacy_to_unified_names[resource]
|
||||
if resource_name not in unified_defaults:
|
||||
msg = f'Creating default limit: {resource_name} = {rlimit}'
|
||||
if region_id:
|
||||
msg += f' in region {region_id}'
|
||||
output(_(msg))
|
||||
if not dry_run:
|
||||
try:
|
||||
keystone_api.create_registered_limit(
|
||||
resource_name=resource_name,
|
||||
default_limit=rlimit, region_id=region_id,
|
||||
service_id=service_id)
|
||||
except Exception as e:
|
||||
msg = f'Failed to create default limit: {str(e)}'
|
||||
print(_(msg))
|
||||
return_code = 1
|
||||
else:
|
||||
existing_rlimit = unified_defaults[resource_name]
|
||||
msg = (f'A default limit: {resource_name} = {existing_rlimit} '
|
||||
'already exists in Keystone, skipping ...')
|
||||
output(_(msg))
|
||||
|
||||
# Create project limits if there are any.
|
||||
if not project_id:
|
||||
return return_code
|
||||
|
||||
output(_('Reading project limits from the Nova API database ...'))
|
||||
legacy_projects = objects.Quotas.get_all_by_project(ctxt, project_id)
|
||||
legacy_projects.pop('project_id', None)
|
||||
msg = f'Found project limits in the database: {legacy_projects} ...'
|
||||
output(_(msg))
|
||||
|
||||
# Retrieve existing limits from Keystone.
|
||||
project_limits = keystone_api.limits(
|
||||
project_id=project_id, region_id=region_id)
|
||||
unified_projects = {
|
||||
pl.resource_name: pl.resource_limit for pl in project_limits}
|
||||
msg = f'Found project limits in Keystone: {unified_projects} ...'
|
||||
output(_(msg))
|
||||
|
||||
output(_('Creating project limits in Keystone ...'))
|
||||
for resource, plimit in legacy_projects.items():
|
||||
resource_name = legacy_to_unified_names[resource]
|
||||
if resource_name not in unified_projects:
|
||||
msg = (
|
||||
f'Creating project limit: {resource_name} = {plimit} '
|
||||
f'for project {project_id}')
|
||||
if region_id:
|
||||
msg += f' in region {region_id}'
|
||||
output(_(msg))
|
||||
if not dry_run:
|
||||
try:
|
||||
keystone_api.create_limit(
|
||||
resource_name=resource_name,
|
||||
resource_limit=plimit, project_id=project_id,
|
||||
region_id=region_id, service_id=service_id)
|
||||
except Exception as e:
|
||||
msg = f'Failed to create project limit: {str(e)}'
|
||||
print(_(msg))
|
||||
return_code = 1
|
||||
else:
|
||||
existing_plimit = unified_projects[resource_name]
|
||||
msg = (f'A project limit: {resource_name} = {existing_plimit} '
|
||||
'already exists in Keystone, skipping ...')
|
||||
output(_(msg))
|
||||
|
||||
return return_code
|
||||
|
||||
@action_description(
|
||||
_("Copy quota limits from the Nova API database to Keystone."))
|
||||
@args('--project-id', metavar='<project-id>', dest='project_id',
|
||||
help='Project ID for which to migrate quota limits')
|
||||
@args('--region-id', metavar='<region-id>', dest='region_id',
|
||||
help='Region ID for which to migrate quota limits')
|
||||
@args('--verbose', action='store_true', dest='verbose', default=False,
|
||||
help='Provide verbose output during execution.')
|
||||
@args('--dry-run', action='store_true', dest='dry_run', default=False,
|
||||
help='Show what limits would be created without actually '
|
||||
'creating them.')
|
||||
def migrate_to_unified_limits(self, project_id=None, region_id=None,
|
||||
verbose=False, dry_run=False):
|
||||
"""Migrate quota limits from legacy quotas to unified limits.
|
||||
|
||||
Return codes:
|
||||
* 0: Command completed successfully.
|
||||
* 1: An unexpected error occurred.
|
||||
* 2: Failed to connect to the database.
|
||||
"""
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
output = lambda msg: None
|
||||
if verbose:
|
||||
output = lambda msg: print(msg)
|
||||
|
||||
output(_('Reading default limits from the Nova API database ...'))
|
||||
|
||||
try:
|
||||
# This will look for limits in the 'default' quota class first and
|
||||
# then fall back to the [quota] config options.
|
||||
legacy_defaults = nova.quota.QUOTAS.get_defaults(ctxt)
|
||||
except db_exc.CantStartEngineError:
|
||||
print(_('Failed to connect to the database so aborting this '
|
||||
'migration attempt. Please check your config file to make '
|
||||
'sure that [api_database]/connection and '
|
||||
'[database]/connection are set and run this '
|
||||
'command again.'))
|
||||
return 2
|
||||
|
||||
# Remove obsolete resource limits.
|
||||
for resource in ('fixed_ips', 'floating_ips', 'security_groups',
|
||||
'security_group_rules'):
|
||||
if resource in legacy_defaults:
|
||||
msg = f'Skipping obsolete limit for {resource} ...'
|
||||
output(_(msg))
|
||||
legacy_defaults.pop(resource)
|
||||
|
||||
msg = (
|
||||
f'Found default limits in the database: {legacy_defaults} ...')
|
||||
output(_(msg))
|
||||
|
||||
try:
|
||||
return self._create_unified_limits(
|
||||
ctxt, legacy_defaults, project_id, region_id, output, dry_run)
|
||||
except Exception as e:
|
||||
msg = (f'Unexpected error, see nova-manage.log for the full '
|
||||
f'trace: {str(e)}')
|
||||
print(_(msg))
|
||||
LOG.exception('Unexpected error')
|
||||
return 1
|
||||
|
||||
|
||||
CATEGORIES = {
|
||||
'api_db': ApiDbCommands,
|
||||
'cell_v2': CellV2Commands,
|
||||
@@ -3375,6 +3555,7 @@ CATEGORIES = {
|
||||
'libvirt': LibvirtCommands,
|
||||
'volume_attachment': VolumeAttachmentCommands,
|
||||
'image_property': ImagePropertyCommands,
|
||||
'limits': LimitsCommands,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ keystone_group = cfg.OptGroup(
|
||||
def register_opts(conf):
|
||||
conf.register_group(keystone_group)
|
||||
confutils.register_ksa_opts(conf, keystone_group.name,
|
||||
DEFAULT_SERVICE_TYPE, include_auth=False)
|
||||
DEFAULT_SERVICE_TYPE, include_auth=True)
|
||||
|
||||
|
||||
def list_opts():
|
||||
|
||||
Vendored
+63
@@ -2032,3 +2032,66 @@ class GreenThreadPoolShutdownWait(fixtures.Fixture):
|
||||
self.useFixture(fixtures.MockPatch(
|
||||
'futurist.GreenThreadPoolExecutor.shutdown',
|
||||
lambda self, wait: real_shutdown(self, wait=True)))
|
||||
|
||||
|
||||
class UnifiedLimitsFixture(fixtures.Fixture):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.mock_sdk_adapter = mock.Mock()
|
||||
real_get_sdk_adapter = utils.get_sdk_adapter
|
||||
|
||||
def fake_get_sdk_adapter(service_type, **kwargs):
|
||||
if service_type == 'identity':
|
||||
return self.mock_sdk_adapter
|
||||
return real_get_sdk_adapter(service_type, **kwargs)
|
||||
|
||||
self.useFixture(fixtures.MockPatch(
|
||||
'nova.utils.get_sdk_adapter', fake_get_sdk_adapter))
|
||||
|
||||
self.mock_sdk_adapter.registered_limits.side_effect = (
|
||||
self.registered_limits)
|
||||
self.mock_sdk_adapter.limits.side_effect = self.limits
|
||||
self.mock_sdk_adapter.create_registered_limit.side_effect = (
|
||||
self.create_registered_limit)
|
||||
self.mock_sdk_adapter.create_limit.side_effect = self.create_limit
|
||||
|
||||
self.registered_limits_list = []
|
||||
self.limits_list = []
|
||||
|
||||
def registered_limits(self, region_id=None):
|
||||
if region_id:
|
||||
return [rl for rl in self.registered_limits_list
|
||||
if rl.region_id == region_id]
|
||||
return self.registered_limits_list
|
||||
|
||||
def limits(self, project_id=None, region_id=None):
|
||||
limits_list = self.limits_list
|
||||
if project_id:
|
||||
limits_list = [pl for pl in limits_list
|
||||
if pl.project_id == project_id]
|
||||
if region_id:
|
||||
limits_list = [pl for pl in limits_list
|
||||
if pl.region_id == region_id]
|
||||
return limits_list
|
||||
|
||||
def create_registered_limit(self, **attrs):
|
||||
rl = collections.namedtuple(
|
||||
'RegisteredLimit',
|
||||
['resource_name', 'default_limit', 'region_id', 'service_id'])
|
||||
rl.resource_name = attrs.get('resource_name')
|
||||
rl.default_limit = attrs.get('default_limit')
|
||||
rl.region_id = attrs.get('region_id')
|
||||
rl.service_id = attrs.get('service_id')
|
||||
self.registered_limits_list.append(rl)
|
||||
|
||||
def create_limit(self, **attrs):
|
||||
pl = collections.namedtuple(
|
||||
'Limit',
|
||||
['resource_name', 'resource_limit', 'project_id', 'region_id',
|
||||
'service_id'])
|
||||
pl.resource_name = attrs.get('resource_name')
|
||||
pl.resource_limit = attrs.get('resource_limit')
|
||||
pl.project_id = attrs.get('project_id')
|
||||
pl.region_id = attrs.get('region_id')
|
||||
pl.service_id = attrs.get('service_id')
|
||||
self.limits_list.append(pl)
|
||||
|
||||
@@ -20,6 +20,7 @@ from unittest import mock
|
||||
import fixtures
|
||||
from neutronclient.common import exceptions as neutron_client_exc
|
||||
import os_resource_classes as orc
|
||||
from oslo_db import exception as oslo_db_exc
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils.fixture import uuidsentinel as uuids
|
||||
from oslo_utils import timeutils
|
||||
@@ -2416,3 +2417,203 @@ class TestDBArchiveDeletedRowsMultiCellTaskLog(
|
||||
for cell_name in ('cell1', 'cell2'):
|
||||
self.assertRegex(
|
||||
self.output.getvalue(), r'\| %s.task_log\s+\| 2' % cell_name)
|
||||
|
||||
|
||||
class TestNovaManageLimits(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.ctxt = context.get_admin_context()
|
||||
self.cli = manage.LimitsCommands()
|
||||
self.output = StringIO()
|
||||
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
|
||||
self.ul_api = self.useFixture(nova_fixtures.UnifiedLimitsFixture())
|
||||
|
||||
@mock.patch('nova.quota.QUOTAS.get_defaults')
|
||||
def test_migrate_to_unified_limits_no_db_access(self, mock_get_defaults):
|
||||
mock_get_defaults.side_effect = oslo_db_exc.CantStartEngineError()
|
||||
return_code = self.cli.migrate_to_unified_limits(verbose=True)
|
||||
self.assertEqual(2, return_code)
|
||||
|
||||
@mock.patch('nova.utils.get_sdk_adapter')
|
||||
def test_migrate_to_unified_limits_unexpected_error(self, mock_sdk):
|
||||
# Simulate an error creating limits.
|
||||
mock_sdk.return_value.create_registered_limit.side_effect = (
|
||||
test.TestingException('oops!'))
|
||||
mock_sdk.return_value.create_limit.side_effect = (
|
||||
test.TestingException('oops!'))
|
||||
|
||||
# Create a couple of project limits.
|
||||
objects.Quotas.create_limit(self.ctxt, uuids.project, 'ram', 8192)
|
||||
objects.Quotas.create_limit(self.ctxt, uuids.project, 'instances', 25)
|
||||
|
||||
return_code = self.cli.migrate_to_unified_limits(
|
||||
project_id=uuids.project, verbose=True)
|
||||
self.assertEqual(1, return_code)
|
||||
|
||||
# Verify that limit create attempts for other resources were attempted
|
||||
# after an unexpected error.
|
||||
#
|
||||
# There are 10 default limit values in the config options: instances,
|
||||
# cores, ram, metadata_items, injected_files,
|
||||
# injected_file_content_bytes, injected_file_path_length, key_pairs,
|
||||
# server_groups, and server_group_members.
|
||||
self.assertEqual(
|
||||
10, mock_sdk.return_value.create_registered_limit.call_count)
|
||||
|
||||
self.assertEqual(2, mock_sdk.return_value.create_limit.call_count)
|
||||
|
||||
def test_migrate_to_unified_limits_already_exists(self):
|
||||
# Create a couple of unified limits to already exist.
|
||||
self.ul_api.create_registered_limit(
|
||||
resource_name='servers', default_limit=8)
|
||||
self.ul_api.create_limit(
|
||||
resource_name='class:VCPU', resource_limit=6,
|
||||
project_id=uuids.project)
|
||||
|
||||
# Create a couple of project limits.
|
||||
objects.Quotas.create_limit(self.ctxt, uuids.project, 'cores', 10)
|
||||
objects.Quotas.create_limit(self.ctxt, uuids.project, 'instances', 25)
|
||||
|
||||
self.cli.migrate_to_unified_limits(
|
||||
project_id=uuids.project, verbose=True)
|
||||
|
||||
# There are 10 default limit values in the config options, so because a
|
||||
# limit for 'servers' already exists, we should have only created 9.
|
||||
mock_sdk = self.ul_api.mock_sdk_adapter
|
||||
self.assertEqual(
|
||||
9, mock_sdk.create_registered_limit.call_count)
|
||||
|
||||
# There already exists a project limit for 'class:VCPU', so we should
|
||||
# have created only 1 project limit.
|
||||
self.assertEqual(1, mock_sdk.create_limit.call_count)
|
||||
|
||||
def test_migrate_to_unified_limits(self):
|
||||
# Set some defaults using the config options.
|
||||
self.flags(instances=5, group='quota')
|
||||
self.flags(cores=22, group='quota')
|
||||
self.flags(ram=4096, group='quota')
|
||||
self.flags(metadata_items=64, group='quota')
|
||||
self.flags(injected_files=3, group='quota')
|
||||
self.flags(injected_file_content_bytes=9 * 1024, group='quota')
|
||||
self.flags(injected_file_path_length=250, group='quota')
|
||||
self.flags(key_pairs=50, group='quota')
|
||||
self.flags(server_groups=7, group='quota')
|
||||
self.flags(server_group_members=12, group='quota')
|
||||
# Create a couple of defaults via the 'default' quota class. These take
|
||||
# precedence over the config option values.
|
||||
objects.Quotas.create_class(self.ctxt, 'default', 'cores', 10)
|
||||
objects.Quotas.create_class(self.ctxt, 'default', 'key_pairs', 75)
|
||||
# Create obsolete limits which should not be migrated to unified
|
||||
# limits.
|
||||
objects.Quotas.create_class(self.ctxt, 'default', 'fixed_ips', 8)
|
||||
objects.Quotas.create_class(self.ctxt, 'default', 'floating_ips', 6)
|
||||
objects.Quotas.create_class(self.ctxt, 'default', 'security_groups', 4)
|
||||
objects.Quotas.create_class(
|
||||
self.ctxt, 'default', 'security_group_rules', 14)
|
||||
# Create a couple of project limits.
|
||||
objects.Quotas.create_limit(self.ctxt, uuids.project, 'ram', 8192)
|
||||
objects.Quotas.create_limit(self.ctxt, uuids.project, 'instances', 25)
|
||||
|
||||
# Verify there are no unified limits yet.
|
||||
registered_limits = self.ul_api.registered_limits()
|
||||
self.assertEqual(0, len(registered_limits))
|
||||
limits = self.ul_api.limits(project_id=uuids.project)
|
||||
self.assertEqual(0, len(limits))
|
||||
|
||||
# Verify that --dry-run works to not actually create limits.
|
||||
self.cli.migrate_to_unified_limits(dry_run=True)
|
||||
|
||||
# There should still be no unified limits yet.
|
||||
registered_limits = self.ul_api.registered_limits()
|
||||
self.assertEqual(0, len(registered_limits))
|
||||
limits = self.ul_api.limits(project_id=uuids.project)
|
||||
self.assertEqual(0, len(limits))
|
||||
|
||||
# Migrate the limits.
|
||||
self.cli.migrate_to_unified_limits(
|
||||
project_id=uuids.project, verbose=True)
|
||||
|
||||
# There should be 10 registered (default) limits now.
|
||||
expected_registered_limits = {
|
||||
'servers': 5,
|
||||
'class:VCPU': 10,
|
||||
'class:MEMORY_MB': 4096,
|
||||
'server_metadata_items': 64,
|
||||
'server_injected_files': 3,
|
||||
'server_injected_file_content_bytes': 9 * 1024,
|
||||
'server_injected_file_path_bytes': 250,
|
||||
'server_key_pairs': 75,
|
||||
'server_groups': 7,
|
||||
'server_group_members': 12,
|
||||
}
|
||||
|
||||
registered_limits = self.ul_api.registered_limits()
|
||||
self.assertEqual(10, len(registered_limits))
|
||||
for rl in registered_limits:
|
||||
self.assertEqual(
|
||||
expected_registered_limits[rl.resource_name], rl.default_limit)
|
||||
|
||||
# And 2 project limits.
|
||||
expected_limits = {
|
||||
'class:MEMORY_MB': 8192,
|
||||
'servers': 25,
|
||||
}
|
||||
|
||||
limits = self.ul_api.limits(project_id=uuids.project)
|
||||
self.assertEqual(2, len(limits))
|
||||
for pl in limits:
|
||||
self.assertEqual(
|
||||
expected_limits[pl.resource_name], pl.resource_limit)
|
||||
|
||||
# Verify there are no project limits for a different project.
|
||||
other_project_limits = self.ul_api.limits(
|
||||
project_id=uuids.otherproject)
|
||||
self.assertEqual(0, len(other_project_limits))
|
||||
|
||||
# Try migrating limits for a specific region.
|
||||
region_registered_limits = self.ul_api.registered_limits(
|
||||
region_id=uuids.region)
|
||||
self.assertEqual(0, len(region_registered_limits))
|
||||
|
||||
self.cli.migrate_to_unified_limits(
|
||||
region_id=uuids.region, verbose=True)
|
||||
|
||||
region_registered_limits = self.ul_api.registered_limits(
|
||||
region_id=uuids.region)
|
||||
self.assertEqual(10, len(region_registered_limits))
|
||||
for rl in region_registered_limits:
|
||||
self.assertEqual(
|
||||
expected_registered_limits[rl.resource_name], rl.default_limit)
|
||||
|
||||
# Try migrating project limits for that region.
|
||||
region_limits = self.ul_api.limits(
|
||||
project_id=uuids.project, region_id=uuids.region)
|
||||
self.assertEqual(0, len(region_limits))
|
||||
|
||||
self.cli.migrate_to_unified_limits(
|
||||
project_id=uuids.project, region_id=uuids.region, verbose=True)
|
||||
|
||||
region_limits = self.ul_api.limits(
|
||||
project_id=uuids.project, region_id=uuids.region)
|
||||
self.assertEqual(2, len(region_limits))
|
||||
for pl in region_limits:
|
||||
self.assertEqual(
|
||||
expected_limits[pl.resource_name], pl.resource_limit)
|
||||
|
||||
# Verify no --verbose outputs nothing, migrate limits for a different
|
||||
# project after clearing stdout.
|
||||
self.output = StringIO()
|
||||
self.assertEqual('', self.output.getvalue())
|
||||
|
||||
# Create a limit for the other project.
|
||||
objects.Quotas.create_limit(self.ctxt, uuids.otherproject, 'ram', 2048)
|
||||
|
||||
self.cli.migrate_to_unified_limits(project_id=uuids.otherproject)
|
||||
|
||||
other_project_limits = self.ul_api.limits(
|
||||
project_id=uuids.otherproject)
|
||||
self.assertEqual(1, len(other_project_limits))
|
||||
|
||||
# Output should still be empty after migrating.
|
||||
self.assertEqual('', self.output.getvalue())
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
features:
|
||||
- |
|
||||
A new command ``nova-manage limits migrate_to_unified_limits`` has been
|
||||
added to make migration from the ``nova.quota.DbQuotaDriver`` to the
|
||||
``nova.quota.UnifiedLimitsDriver`` easier. This will enable operators to
|
||||
have their existing quota limits copied from the Nova database to Keystone
|
||||
automatically.
|
||||
Reference in New Issue
Block a user