xenapi: ensure vdi is not too big when resizing down

Fix for bug 1155066

This change adds some rollback into the resize disk down
code within xenapi, and reports a better error message
when the disk is too big to resize down.

On a successful rollback, the user is notified of the error
by the instance actions, rather than leaving
the server in the error state.

The user is then able to free up some disk space such that
the resize can work correctly.

Change-Id: Ibad568ab3cfb9caaf4fe002572c8cda973d501a7
This commit is contained in:
John Garbutt
2013-03-14 15:41:47 +00:00
committed by Gerrit Code Review
parent af7823f5e7
commit 72e75dbcea
10 changed files with 385 additions and 76 deletions
+7
View File
@@ -4117,6 +4117,13 @@ class ComputeManager(manager.SchedulerDependentManager):
reservations=None):
try:
yield
except exception.InstanceFaultRollback, error:
self._quota_rollback(context, reservations)
msg = _("Setting instance back to ACTIVE after: %s")
LOG.info(msg % error, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=vm_states.ACTIVE)
raise error.inner_exception
except Exception, error:
with excutils.save_and_reraise_exception():
self._quota_rollback(context, reservations)
+7
View File
@@ -1196,3 +1196,10 @@ class BuildAbortException(NovaException):
class RescheduledException(NovaException):
message = _("Build of instance %(instance_uuid)s was re-scheduled: "
"%(reason)s")
class InstanceFaultRollback(NovaException):
def __init__(self, inner_exception=None):
message = _("Instance rollback performed due to: %s")
self.inner_exception = inner_exception
super(InstanceFaultRollback, self).__init__(message % inner_exception)
+37
View File
@@ -3089,6 +3089,43 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_resize_instance_driver_rollback(self):
# Ensure instance status set to Running after rollback.
def throw_up(*args, **kwargs):
raise exception.InstanceFaultRollback(test.TestingException())
self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
throw_up)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_type = flavors.get_default_instance_type()
reservations = self._ensure_quota_reservations_rolledback()
self.compute.run_instance(self.context, instance=instance)
new_instance = db.instance_update(self.context, instance['uuid'],
{'host': 'foo'})
new_instance = jsonutils.to_primitive(new_instance)
self.compute.prep_resize(self.context, instance=new_instance,
instance_type=instance_type, image={},
reservations=reservations)
migration_ref = db.migration_get_by_instance_and_status(
self.context.elevated(), new_instance['uuid'], 'pre-migrating')
db.instance_update(self.context, new_instance['uuid'],
{"task_state": task_states.RESIZE_PREP})
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=new_instance,
migration=migration_ref, image={},
reservations=reservations,
instance_type=jsonutils.to_primitive(instance_type))
instance = db.instance_get_by_uuid(self.context, new_instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], None)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_resize_instance(self):
# Ensure instance can be migrated/resized.
instance = jsonutils.to_primitive(self._create_fake_instance())
+116
View File
@@ -1513,6 +1513,122 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.context, instance,
'127.0.0.1', instance_type, None)
def test_migrate_rollback_when_resize_down_fs_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
virtapi = vmops._virtapi
self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
self.mox.StubOutWithMock(vm_utils, 'resize_disk')
self.mox.StubOutWithMock(vmops, '_migrate_vhd')
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
self.mox.StubOutWithMock(virtapi, 'instance_update')
instance = {'auto_disk_config': True, 'uuid': 'uuid'}
vm_ref = "vm_ref"
dest = "dest"
instance_type = "type"
sr_path = "sr_path"
virtapi.instance_update(self.context, 'uuid', {'progress': 20.0})
vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
vmops._apply_orig_vm_name_label(instance, vm_ref)
old_vdi_ref = "old_ref"
vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
(old_vdi_ref, None))
virtapi.instance_update(self.context, 'uuid', {'progress': 40.0})
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
instance_type).AndReturn((new_vdi_ref, new_vdi_uuid))
virtapi.instance_update(self.context, 'uuid', {'progress': 60.0})
vmops._migrate_vhd(instance, new_vdi_uuid, dest,
sr_path, 0).AndRaise(
exception.ResizeError(reason="asdf"))
vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
vmops._restore_orig_vm_and_cleanup_orphan(instance, None)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceFaultRollback,
vmops._migrate_disk_resizing_down, self.context,
instance, dest, instance_type, vm_ref, sr_path)
def test_resize_ensure_vm_is_shutdown_cleanly(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_forced(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
+86
View File
@@ -278,3 +278,89 @@ class FetchVhdImageTestCase(test.TestCase):
self.context, self.session, self.instance, self.image_id)
self.mox.VerifyAll()
class ResizeHelpersTestCase(test.TestCase):
def test_get_min_sectors(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('resize2fs', '-P', "fakepath",
run_as_root=True).AndReturn(("size is: 42", ""))
self.mox.ReplayAll()
result = vm_utils._get_min_sectors("fakepath")
self.assertEquals(42 * 4096 / 512, result)
def test_repair_filesystem(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('e2fsck', '-f', "-y", "fakepath",
run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn(
("size is: 42", ""))
self.mox.ReplayAll()
vm_utils._repair_filesystem("fakepath")
def _call_tune2fs_remove_journal(self, path):
utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True)
def _call_tune2fs_add_journal(self, path):
utils.execute("tune2fs", "-j", path, run_as_root=True)
def _call_parted(self, path, start, end):
utils.execute('parted', '--script', path, 'rm', '1',
run_as_root=True)
utils.execute('parted', '--script', path, 'mkpart',
'primary', '%ds' % start, '%ds' % end, run_as_root=True)
def test_resize_part_and_fs_down_succeeds(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(vm_utils, "_get_min_sectors")
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
vm_utils._get_min_sectors(partition_path).AndReturn(9)
utils.execute("resize2fs", partition_path, "10s", run_as_root=True)
self._call_parted(dev_path, 0, 9)
self._call_tune2fs_add_journal(partition_path)
self.mox.ReplayAll()
vm_utils._resize_part_and_fs("fake", 0, 20, 10)
def test_resize_part_and_fs_down_fails_disk_too_big(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(vm_utils, "_get_min_sectors")
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
vm_utils._get_min_sectors(partition_path).AndReturn(10)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vm_utils._resize_part_and_fs, "fake", 0, 20, 10)
def test_resize_part_and_fs_up_succeeds(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
self._call_parted(dev_path, 0, 29)
utils.execute("resize2fs", partition_path, run_as_root=True)
self._call_tune2fs_add_journal(partition_path)
self.mox.ReplayAll()
vm_utils._resize_part_and_fs("fake", 0, 20, 30)
+2 -2
View File
@@ -33,7 +33,7 @@ class VolumeAttachTestCase(test.TestCase):
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
self.mox.StubOutWithMock(volumeops.vm_utils, 'find_vbd_by_number')
self.mox.StubOutWithMock(volumeops.vm_utils, '_is_vm_shutdown')
self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
@@ -49,7 +49,7 @@ class VolumeAttachTestCase(test.TestCase):
volumeops.vm_utils.find_vbd_by_number(
'session', 'vmref', 'devnumber').AndReturn('vbdref')
volumeops.vm_utils._is_vm_shutdown('session', 'vmref').AndReturn(
volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
False)
volumeops.vm_utils.unplug_vbd('session', 'vbdref')
+2 -11
View File
@@ -239,17 +239,8 @@ class XenAPIDriver(driver.ComputeDriver):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk"""
# NOTE(vish): Xen currently does not use network info.
rv = self._vmops.migrate_disk_and_power_off(context, instance,
dest, instance_type)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
name_label = self._vmops._get_orig_vm_name_label(instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.detach_volume(connection_info,
name_label, mount_device)
return rv
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, instance_type, block_device_info)
def suspend(self, instance):
"""suspend the specified instance."""
+25 -7
View File
@@ -286,7 +286,7 @@ def destroy_vm(session, instance, vm_ref):
def clean_shutdown_vm(session, instance, vm_ref):
if _is_vm_shutdown(session, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warn(_("VM already halted, skipping shutdown..."),
instance=instance)
return False
@@ -301,7 +301,7 @@ def clean_shutdown_vm(session, instance, vm_ref):
def hard_shutdown_vm(session, instance, vm_ref):
if _is_vm_shutdown(session, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warn(_("VM already halted, skipping shutdown..."),
instance=instance)
return False
@@ -315,7 +315,7 @@ def hard_shutdown_vm(session, instance, vm_ref):
return True
def _is_vm_shutdown(session, vm_ref):
def is_vm_shutdown(session, vm_ref):
vm_rec = session.call_xenapi("VM.get_record", vm_ref)
state = compile_info(vm_rec)['state']
if state == power_state.SHUTDOWN:
@@ -2078,6 +2078,21 @@ def _write_partition(virtual_size, dev):
LOG.debug(_('Writing partition table %s done.'), dev_path)
def _get_min_sectors(partition_path, block_size=4096):
stdout, _err = utils.execute('resize2fs', '-P', partition_path,
run_as_root=True)
min_size_blocks = long(re.sub('[^0-9]', '', stdout))
min_size_bytes = min_size_blocks * block_size
return min_size_bytes / SECTOR_SIZE
def _repair_filesystem(partition_path):
# Exit Code 1 = File system errors corrected
# 2 = File system errors corrected, system needs a reboot
utils.execute('e2fsck', '-f', '-y', partition_path, run_as_root=True,
check_exit_code=[0, 1, 2])
def _resize_part_and_fs(dev, start, old_sectors, new_sectors):
"""Resize partition and fileystem.
@@ -2091,10 +2106,7 @@ def _resize_part_and_fs(dev, start, old_sectors, new_sectors):
partition_path = utils.make_dev_path(dev, partition=1)
# Replay journal if FS wasn't cleanly unmounted
# Exit Code 1 = File system errors corrected
# 2 = File system errors corrected, system needs a reboot
utils.execute('e2fsck', '-f', '-y', partition_path, run_as_root=True,
check_exit_code=[0, 1, 2])
_repair_filesystem(partition_path)
# Remove ext3 journal (making it ext2)
utils.execute('tune2fs', '-O ^has_journal', partition_path,
@@ -2102,6 +2114,12 @@ def _resize_part_and_fs(dev, start, old_sectors, new_sectors):
if new_sectors < old_sectors:
# Resizing down, resize filesystem before partition resize
min_sectors = _get_min_sectors(partition_path)
if min_sectors >= new_sectors:
reason = _('Resize down not allowed because minimum '
'filesystem sectors %(min_sectors)d is too big '
'for target sectors %(new_sectors)d')
raise exception.ResizeError(reason=(reason % locals()))
utils.execute('resize2fs', partition_path, '%ds' % size,
run_as_root=True)
+101 -54
View File
@@ -209,6 +209,9 @@ class VMOps(object):
return nova_uuids
def confirm_migration(self, migration, instance, network_info):
self._destroy_orig_vm(instance, network_info)
def _destroy_orig_vm(self, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info=network_info)
@@ -227,6 +230,9 @@ class VMOps(object):
hotplug=False)
def finish_revert_migration(self, instance, block_device_info=None):
self._restore_orig_vm_and_cleanup_orphan(instance, block_device_info)
def _restore_orig_vm_and_cleanup_orphan(self, instance, block_device_info):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
@@ -797,53 +803,84 @@ class VMOps(object):
self._virtapi.instance_update(context, instance['uuid'],
{'progress': progress})
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
# 1. NOOP since we're not transmitting the base-copy separately
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
def _resize_ensure_vm_is_shutdown(self, instance, vm_ref):
if vm_utils.is_vm_shutdown(self._session, vm_ref):
LOG.debug(_("VM was already shutdown."), instance=instance)
return
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
vdi_uuid = vm_vdi_rec['uuid']
old_gb = instance['root_gb']
new_gb = instance_type['root_gb']
LOG.debug(_("Resizing down VDI %(vdi_uuid)s from "
"%(old_gb)dGB to %(new_gb)dGB"), locals(),
instance=instance)
# 2. Power down the instance before resizing
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
if not vm_utils.hard_shutdown_vm(self._session, instance, vm_ref):
raise exception.ResizeError(
reason=_("Unable to terminate instance."))
# 3. Copy VDI, resize partition and filesystem, forget VDI,
# truncate VHD
new_ref, new_uuid = vm_utils.resize_disk(self._session,
instance,
vdi_ref,
instance_type)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
if not instance['auto_disk_config']:
reason = _('Resize down not allowed without auto_disk_config')
raise exception.ResizeError(reason=reason)
# 4. Transfer the new VHD
self._migrate_vhd(instance, new_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
step = make_step_decorator(context, instance,
self._virtapi.instance_update)
# Clean up VDI now that it's been copied
vm_utils.destroy_vdi(self._session, new_ref)
@step
def fake_step_to_match_resizing_up():
pass
@step
def rename_and_power_off_vm(undo_mgr):
self._resize_ensure_vm_is_shutdown(instance, vm_ref)
self._apply_orig_vm_name_label(instance, vm_ref)
def restore_orig_vm():
# Do not need to restore block devices, not yet been removed
self._restore_orig_vm_and_cleanup_orphan(instance, None)
undo_mgr.undo_with(restore_orig_vm)
@step
def create_copy_vdi_and_resize(undo_mgr, old_vdi_ref):
new_vdi_ref, new_vdi_uuid = vm_utils.resize_disk(self._session,
instance, old_vdi_ref, instance_type)
def cleanup_vdi_copy():
vm_utils.destroy_vdi(self._session, new_vdi_ref)
undo_mgr.undo_with(cleanup_vdi_copy)
return new_vdi_ref, new_vdi_uuid
@step
def transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid):
self._migrate_vhd(instance, new_vdi_uuid, dest, sr_path, 0)
# Clean up VDI now that it's been copied
vm_utils.destroy_vdi(self._session, new_vdi_ref)
@step
def fake_step_to_be_executed_by_finish_migration():
pass
undo_mgr = utils.UndoManager()
try:
fake_step_to_match_resizing_up()
rename_and_power_off_vm(undo_mgr)
old_vdi_ref, _ignore = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
new_vdi_ref, new_vdi_uuid = create_copy_vdi_and_resize(
undo_mgr, old_vdi_ref)
transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid)
except Exception, error:
msg = _("_migrate_disk_resizing_down failed. "
"Restoring orig vm due_to: %{exception}.")
LOG.exception(msg, instance=instance)
undo_mgr._rollback()
raise exception.InstanceFaultRollback(error)
def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref,
sr_path):
self._apply_orig_vm_name_label(instance, vm_ref)
# 1. Create Snapshot
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
@@ -865,10 +902,7 @@ class VMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
# 3. Now power down the instance
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._resize_ensure_vm_is_shutdown(instance, vm_ref)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
@@ -882,8 +916,15 @@ class VMOps(object):
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def _apply_orig_vm_name_label(self, instance, vm_ref):
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type):
instance_type, block_device_info):
"""Copies a VHD from one host machine to another, possibly
resizing filesystem before hand.
@@ -891,23 +932,17 @@ class VMOps(object):
:param dest: the destination host machine.
:param instance_type: instance_type to resize to
"""
vm_ref = self._get_vm_opaque_ref(instance)
sr_path = vm_utils.get_sr_path(self._session)
resize_down = instance['root_gb'] > instance_type['root_gb']
if resize_down and not instance['auto_disk_config']:
reason = _('Resize down not allowed without auto_disk_config')
raise exception.ResizeError(reason=reason)
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
vm_ref = self._get_vm_opaque_ref(instance)
sr_path = vm_utils.get_sr_path(self._session)
old_gb = instance['root_gb']
new_gb = instance_type['root_gb']
resize_down = old_gb > new_gb
if resize_down:
self._migrate_disk_resizing_down(
@@ -916,12 +951,24 @@ class VMOps(object):
self._migrate_disk_resizing_up(
context, instance, dest, vm_ref, sr_path)
self._detach_block_devices_from_orig_vm(instance, block_device_info)
# NOTE(sirp): disk_info isn't used by the xenapi driver, instead it
# uses a staging-area (/images/instance<uuid>) and sequence-numbered
# VHDs to figure out how to reconstruct the VDI chain after syncing
disk_info = {}
return disk_info
def _detach_block_devices_from_orig_vm(self, instance, block_device_info):
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
name_label = self._get_orig_vm_name_label(instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.detach_volume(connection_info, name_label,
mount_device)
def _resize_instance(self, instance, root_vdi):
"""Resize an instances root disk."""
+2 -2
View File
@@ -141,7 +141,7 @@ class VolumeOps(object):
return
# Unplug VBD if we're NOT shutdown
unplug = not vm_utils._is_vm_shutdown(self._session, vm_ref)
unplug = not vm_utils.is_vm_shutdown(self._session, vm_ref)
self._detach_vbd(vbd_ref, unplug=unplug)
LOG.info(_('Mountpoint %(mountpoint)s detached from instance'
@@ -171,7 +171,7 @@ class VolumeOps(object):
# Generally speaking, detach_all will be called with VM already
# shutdown; however if it's still running, we can still perform the
# operation by unplugging the VBD first.
unplug = not vm_utils._is_vm_shutdown(self._session, vm_ref)
unplug = not vm_utils.is_vm_shutdown(self._session, vm_ref)
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
for vbd_ref in vbd_refs: