Retry failed instance file deletes

This change adds a new column to the instances table named "cleaned".
This column indicates that the instance files on disk have been
deleted for that instance, assuming that the virt driver you are
using supports that feature (which only libvirt does for now).

Then the flag instance_delete_interval specifies how often to
retry deletes. The default is five minutes. A value of zero will
disable retries.

The number of attempts to delete instance files is tracked in
system metadata. We give up after maximum_instance_delete_attempts
attempts at cleaning up the files.

This change is done in this way because cleaning will become a more
generic concept in the next patch.

DocImpact: the following new flags are added...

 - instance_delete_interval: how often to try to clean up instance
   files

 - maximum_instance_delete_attempts: how many attempts to have before
   giving up

Resolves bug 1040110.

Change-Id: I895869e18f92ed57bd2f8c1683cf9a398d88b680
This commit is contained in:
Michael Still
2013-07-04 12:38:39 +10:00
parent c7cca029f3
commit cb448db52c
15 changed files with 459 additions and 19 deletions
+8
View File
@@ -687,6 +687,10 @@
# (integer value)
#network_allocate_retries=0
# The number of times to attempt to reap an instance's files.
# (integer value)
#maximum_instance_delete_attempts=5
# interval to pull bandwidth usage info (integer value)
#bandwidth_poll_interval=600
@@ -723,6 +727,10 @@
# shelved (integer value)
#shelved_offload_time=0
# Interval in seconds for retrying failed instance file
# deletes (integer value)
#instance_delete_interval=300
# Action to take if a running deleted instance is
# detected.Valid options are 'noop', 'log' and 'reap'. Set to
# 'noop' to disable. (string value)
+44
View File
@@ -148,6 +148,10 @@ interval_opts = [
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
'when shelved'),
cfg.IntOpt('instance_delete_interval',
default=300,
help=('Interval in seconds for retrying failed instance file '
'deletes'))
]
timeout_opts = [
@@ -187,11 +191,19 @@ running_deleted_opts = [
"instance should be considered eligible for cleanup."),
]
instance_cleaning_opts = [
cfg.IntOpt('maximum_instance_delete_attempts',
default=5,
help=('The number of times to attempt to reap an instance\'s '
'files.')),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.register_opts(interval_opts)
CONF.register_opts(timeout_opts)
CONF.register_opts(running_deleted_opts)
CONF.register_opts(instance_cleaning_opts)
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('console_topic', 'nova.console.rpcapi')
CONF.import_opt('host', 'nova.netconf')
@@ -4799,3 +4811,35 @@ class ComputeManager(manager.SchedulerDependentManager):
context, filters, columns_to_join=[])
self.driver.manage_image_cache(context, filtered_instances)
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _run_pending_deletes(self, context):
"""Retry any pending instance file deletes."""
if CONF.instance_delete_interval == 0:
return
LOG.debug(_('Cleaning up deleted instances'))
filters = {'deleted': True,
'host': CONF.host,
'cleaned': False}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = instance_obj.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs)
LOG.debug(_('There are %d instances to clean'), len(instances))
for instance in instances:
attempts = int(instance.system_metadata.get('clean_attempts', '0'))
LOG.debug(_('Instance has had %(attempts)s of %(max)s '
'cleanup attempts'),
{'attempts': attempts,
'max': CONF.maximum_instance_delete_attempts},
instance=instance)
if attempts < CONF.maximum_instance_delete_attempts:
success = self.driver.delete_instance_files(instance)
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
with utils.temporary_mutation(context, read_deleted='yes'):
instance.save(context)
+6
View File
@@ -1777,6 +1777,12 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
query_prefix = query_prefix.\
filter(models.Instance.vm_state != vm_states.SOFT_DELETED)
if 'cleaned' in filters:
if filters.pop('cleaned'):
query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
else:
query_prefix = query_prefix.filter(models.Instance.cleaned == 0)
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
@@ -0,0 +1,62 @@
# Copyright 2013 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Index, Integer, MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
shadow_instances = Table('shadow_instances', meta, autoload=True)
cleaned_column = Column('cleaned', Integer, default=0)
instances.create_column(cleaned_column)
shadow_instances.create_column(cleaned_column.copy())
cleaned_index = Index('instances_host_deleted_cleaned_idx',
instances.c.host, instances.c.deleted,
instances.c.cleaned)
cleaned_index.create(migrate_engine)
migrate_engine.execute(instances.update().
where(instances.c.deleted > 0).
values(cleaned=1))
migrate_engine.execute(shadow_instances.update().
where(shadow_instances.c.deleted > 0).
values(cleaned=1))
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instances.columns.cleaned.drop()
# NOTE(mikal): some engines remove this index when the column is
# removed (sqlite for example). Instead of hardcoding that
# behaviour we just try to remove the index and ignore the failure
# if there is one.
try:
instances = Table('instances', meta, autoload=True)
cleaned_index = Index('instances_host_deleted_cleaned_idx',
instances.c.host, instances.c.deleted)
cleaned_index.drop(migrate_engine)
except Exception:
pass
shadow_instances = Table('shadow_instances', meta, autoload=True)
shadow_instances.columns.cleaned.drop()
+6 -1
View File
@@ -167,7 +167,9 @@ class Instance(BASE, NovaBase):
Index('instances_task_state_updated_at_idx',
'task_state', 'updated_at'),
Index('instances_host_node_deleted_idx',
'host', 'node', 'deleted')
'host', 'node', 'deleted'),
Index('instances_host_deleted_cleaned_idx',
'host', 'deleted', 'cleaned'),
)
injected_files = []
@@ -284,6 +286,9 @@ class Instance(BASE, NovaBase):
# the cells tree and it'll be a full cell name such as 'api!hop1!hop2'
cell_name = Column(String(255), nullable=True)
# Records whether an instance has been deleted from disk
cleaned = Column(Integer, default=0)
class InstanceInfoCache(BASE, NovaBase):
"""
+13 -1
View File
@@ -49,7 +49,8 @@ class Instance(base.NovaObject):
# Version 1.3: Added expected_vm_state and admin_state_reset to
# save()
# Version 1.4: Added locked_by and deprecated locked
VERSION = '1.4'
# Version 1.5: Added cleaned
VERSION = '1.5'
fields = {
'id': int,
@@ -133,6 +134,8 @@ class Instance(base.NovaObject):
'fault': obj_utils.nested_object_or_none(
instance_fault.InstanceFault),
'cleaned': bool,
}
obj_extra_fields = ['name']
@@ -221,6 +224,8 @@ class Instance(base.NovaObject):
continue
elif field == 'deleted':
instance.deleted = db_inst['deleted'] == db_inst['id']
elif field == 'cleaned':
instance.cleaned = db_inst['cleaned'] == 1
else:
instance[field] = db_inst[field]
@@ -349,6 +354,13 @@ class Instance(base.NovaObject):
_handle_cell_update_from_api()
return
# Cleaned needs to be turned back into an int here
if 'cleaned' in updates:
if updates['cleaned']:
updates['cleaned'] = 1
else:
updates['cleaned'] = 0
if expected_task_state is not None:
updates['expected_task_state'] = expected_task_state
if expected_vm_state is not None:
+3 -2
View File
@@ -471,7 +471,7 @@ def stub_instance(id, user_id=None, project_id=None, host=None,
limit=None, marker=None,
launched_at=timeutils.utcnow(),
terminated_at=timeutils.utcnow(),
availability_zone='', locked_by=None):
availability_zone='', locked_by=None, cleaned=False):
if user_id is None:
user_id = 'fake_user'
@@ -565,7 +565,8 @@ def stub_instance(id, user_id=None, project_id=None, host=None,
"launched_on": "",
"cell_name": "",
"architecture": "",
"os_type": ""}
"os_type": "",
"cleaned": cleaned}
instance.update(info_cache)
instance['info_cache']['instance_uuid'] = instance['uuid']
+48
View File
@@ -522,3 +522,51 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
for ps in (power_state.NOSTATE, power_state.SHUTDOWN):
self._test_sync_to_stop(power_state.RUNNING, vs, ps,
stop=False)
def test_run_pending_deletes(self):
self.flags(instance_delete_interval=10)
class FakeInstance(object):
def __init__(self, uuid, name, smd):
self.uuid = uuid
self.name = name
self.system_metadata = smd
self.cleaned = False
def __getitem__(self, name):
return getattr(self, name)
def save(self, context):
pass
class FakeInstanceList(object):
def get_by_filters(self, *args, **kwargs):
return []
a = FakeInstance('123', 'apple', {'clean_attempts': '100'})
b = FakeInstance('456', 'orange', {'clean_attempts': '3'})
c = FakeInstance('789', 'banana', {})
self.mox.StubOutWithMock(instance_obj.InstanceList,
'get_by_filters')
instance_obj.InstanceList.get_by_filters(
{'read_deleted': 'yes'},
{'deleted': True, 'host': 'fake-mini', 'cleaned': False},
expected_attrs=['info_cache', 'security_groups',
'system_metadata']).AndReturn([a, b, c])
self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files')
self.compute.driver.delete_instance_files(
mox.IgnoreArg()).AndReturn(True)
self.compute.driver.delete_instance_files(
mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
self.compute._run_pending_deletes({})
self.assertFalse(a.cleaned)
self.assertEqual('100', a.system_metadata['clean_attempts'])
self.assertTrue(b.cleaned)
self.assertEqual('4', b.system_metadata['clean_attempts'])
self.assertFalse(c.cleaned)
self.assertEqual('1', c.system_metadata['clean_attempts'])
+15
View File
@@ -1535,6 +1535,21 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
'soft_deleted': True})
self._assertEqualListsOfInstances([inst2, inst3], result)
def test_instance_get_all_by_filters_cleaned(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(reservation_id='b')
db.instance_update(self.ctxt, inst1['uuid'], {'cleaned': 1})
result = db.instance_get_all_by_filters(self.ctxt, {})
self.assertEqual(2, len(result))
self.assertIn(inst1['uuid'], [result[0]['uuid'], result[1]['uuid']])
self.assertIn(inst2['uuid'], [result[0]['uuid'], result[1]['uuid']])
if inst1['uuid'] == result[0]['uuid']:
self.assertTrue(result[0]['cleaned'])
self.assertFalse(result[1]['cleaned'])
else:
self.assertTrue(result[1]['cleaned'])
self.assertFalse(result[0]['cleaned'])
def test_instance_get_all_by_host_and_node_no_join(self):
instance = self.create_instance_with_args()
result = db.instance_get_all_by_host_and_node(self.ctxt, 'h1', 'n1')
+73
View File
@@ -477,6 +477,31 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
globals(), locals(), ['versioning_api'], -1)
self.migration_api = temp.versioning_api
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnNotExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertIndexExists(self, engine, table, index):
t = db_utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = db_utils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(members), sorted(index_columns))
def _pre_upgrade_134(self, engine):
now = timeutils.utcnow()
data = [{
@@ -2455,6 +2480,54 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
rows = table.select().execute().fetchall()
self.assertFalse('locked_by' in rows[0])
def _pre_upgrade_206(self, engine):
instances = db_utils.get_table(engine, 'instances')
shadow_instances = db_utils.get_table(engine, 'shadow_instances')
data = [
{
'id': 650,
'deleted': 0,
},
{
'id': 651,
'deleted': 2,
},
]
for item in data:
instances.insert().values(item).execute()
shadow_instances.insert().values(item).execute()
return data
def _check_206(self, engine, data):
self.assertColumnExists(engine, 'instances', 'cleaned')
self.assertColumnExists(engine, 'shadow_instances', 'cleaned')
self.assertIndexMembers(engine, 'instances',
'instances_host_deleted_cleaned_idx',
['host', 'deleted', 'cleaned'])
instances = db_utils.get_table(engine, 'instances')
shadow_instances = db_utils.get_table(engine, 'shadow_instances')
def get_(table, ident):
return table.select().\
where(table.c.id == ident).\
execute().\
first()
for table in (instances, shadow_instances):
id_1 = get_(instances, 650)
self.assertEqual(0, id_1['deleted'])
self.assertEqual(0, id_1['cleaned'])
id_2 = get_(instances, 651)
self.assertEqual(2, id_2['deleted'])
self.assertEqual(1, id_2['cleaned'])
def _post_downgrade_206(self, engine):
self.assertColumnNotExists(engine, 'instances', 'cleaned')
self.assertColumnNotExists(engine, 'shadow_instances', 'cleaned')
class TestBaremetalMigrations(BaseMigrationTestCase, CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
+46
View File
@@ -292,6 +292,30 @@ class _TestInstanceObject(object):
# NOTE(danms): Make sure it's actually a bool
self.assertEqual(inst.deleted, True)
def test_get_not_cleaned(self):
ctxt = context.get_admin_context()
fake_inst = dict(self.fake_instance, id=123, cleaned=None)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(ctxt, fake_uuid, columns_to_join=[]
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertEqual(inst.cleaned, False)
def test_get_cleaned(self):
ctxt = context.get_admin_context()
fake_inst = dict(self.fake_instance, id=123, cleaned=1)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(ctxt, fake_uuid, columns_to_join=[]
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(ctxt, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertEqual(inst.cleaned, True)
def test_with_info_cache(self):
ctxt = context.get_admin_context()
fake_inst = dict(self.fake_instance)
@@ -444,6 +468,28 @@ class _TestInstanceListObject(object):
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_get_all_by_filters_works_for_cleaned(self):
fakes = [self.fake_instance(1),
self.fake_instance(2, updates={'deleted': 2,
'cleaned': None})]
ctxt = context.get_admin_context()
ctxt.read_deleted = 'yes'
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(ctxt,
{'deleted': True, 'cleaned': False},
'uuid', 'asc', limit=None, marker=None,
columns_to_join=['metadata']).AndReturn(
[fakes[1]])
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_filters(
ctxt, {'deleted': True, 'cleaned': False}, 'uuid', 'asc',
expected_attrs=['metadata'])
self.assertEqual(1, len(inst_list))
self.assertTrue(isinstance(inst_list.objects[0], instance.Instance))
self.assertEqual(inst_list.objects[0].uuid, fakes[1]['uuid'])
self.assertRemotes()
def test_get_by_host(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
+83 -3
View File
@@ -39,6 +39,7 @@ from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.objects import instance as instance_obj
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -3149,21 +3150,31 @@ class LibvirtConnTestCase(test.TestCase):
def fake_lookup_by_name(instance_name):
raise exception.InstanceNotFound(instance_id=instance_name)
def fake_delete_instance_files(instance):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, '_delete_instance_files',
fake_delete_instance_files)
instance = db.instance_create(self.context, self.test_instance)
conn.destroy(instance, {})
def test_destroy_removes_disk(self):
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
instance = {"name": "instancename", "id": "42",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64",
"cleaned": 0, 'info_cache': None, 'security_groups': None}
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_undefine_domain')
libvirt_driver.LibvirtDriver._undefine_domain(instance)
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
columns_to_join=[]).AndReturn(instance)
self.mox.StubOutWithMock(shutil, "rmtree")
shutil.rmtree(os.path.join(CONF.instances_path, instance['name']))
shutil.rmtree(os.path.join(CONF.instances_path,
'instance-%08x' % int(instance['id'])))
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_cleanup_lvm')
libvirt_driver.LibvirtDriver._cleanup_lvm(instance)
@@ -3182,6 +3193,13 @@ class LibvirtConnTestCase(test.TestCase):
def fake_unfilter_instance(instance, network_info):
pass
def fake_obj_load_attr(self, attrname):
if not hasattr(self, attrname):
self[attrname] = {}
def fake_save(self, context):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_destroy', fake_destroy)
@@ -3189,6 +3207,12 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(conn.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(instance_obj.Instance, 'fields',
{'id': int, 'uuid': str, 'cleaned': int})
self.stubs.Set(instance_obj.Instance, 'obj_load_attr',
fake_obj_load_attr)
self.stubs.Set(instance_obj.Instance, 'save', fake_save)
conn.destroy(instance, [])
def test_destroy_not_removes_disk(self):
@@ -3223,6 +3247,41 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(instance, [], None, False)
def test_delete_instance_files(self):
instance = {"name": "instancename", "id": "42",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64",
"cleaned": 0, 'info_cache': None, 'security_groups': None}
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, "rmtree")
db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
columns_to_join=[]).AndReturn(instance)
os.path.exists(mox.IgnoreArg()).AndReturn(False)
os.path.exists(mox.IgnoreArg()).AndReturn(True)
shutil.rmtree(os.path.join(CONF.instances_path, instance['uuid']))
os.path.exists(mox.IgnoreArg()).AndReturn(True)
os.path.exists(mox.IgnoreArg()).AndReturn(False)
os.path.exists(mox.IgnoreArg()).AndReturn(True)
shutil.rmtree(os.path.join(CONF.instances_path, instance['uuid']))
os.path.exists(mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
def fake_obj_load_attr(self, attrname):
if not hasattr(self, attrname):
self[attrname] = {}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(instance_obj.Instance, 'fields',
{'id': int, 'uuid': str, 'cleaned': int})
self.stubs.Set(instance_obj.Instance, 'obj_load_attr',
fake_obj_load_attr)
inst_obj = instance_obj.Instance.get_by_uuid(None, instance['uuid'])
self.assertFalse(conn.delete_instance_files(inst_obj))
self.assertTrue(conn.delete_instance_files(inst_obj))
def test_reboot_different_ids(self):
class FakeLoopingCall:
def start(self, *a, **k):
@@ -3348,9 +3407,15 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
def fake_delete_instance_files(instance):
return None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
self.stubs.Set(conn, '_delete_instance_files',
fake_delete_instance_files)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
@@ -3370,9 +3435,14 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
def fake_delete_instance_files(instance):
return None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
self.stubs.Set(conn, '_delete_instance_files',
fake_delete_instance_files)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
@@ -3394,9 +3464,14 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
def fake_delete_instance_files(instance):
return None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
self.stubs.Set(conn, '_delete_instance_files',
fake_delete_instance_files)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
@@ -3417,9 +3492,14 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
def fake_delete_instance_files(instance):
return None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
self.stubs.Set(conn, '_delete_instance_files',
fake_delete_instance_files)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
+7
View File
@@ -110,6 +110,9 @@ class _FakeDriverBackendTestCase(object):
block_device_info=None):
return '[]'
def fake_delete_instance_files(_self, _instance):
pass
self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
'get_instance_disk_info',
fake_get_instance_disk_info)
@@ -117,6 +120,10 @@ class _FakeDriverBackendTestCase(object):
self.stubs.Set(nova.virt.libvirt.driver.disk,
'extend', fake_extend)
self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
'_delete_instance_files',
fake_delete_instance_files)
# Like the existing fakelibvirt.migrateToURI, do nothing,
# but don't fail for these tests.
self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain,
+7
View File
@@ -974,6 +974,13 @@ class ComputeDriver(object):
LOG.error(_("Exception dispatching event %(event)s: %(ex)s"),
{'event': event, 'ex': ex})
def delete_instance_files(self, instance):
"""Delete any lingering instance files for an instance.
:returns: True if the instance was deleted from disk, False otherwise.
"""
return True
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
+38 -12
View File
@@ -71,6 +71,7 @@ from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova.objects import instance as instance_obj
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
@@ -912,18 +913,7 @@ class LibvirtDriver(driver.ComputeDriver):
disk_dev)
if destroy_disks:
target = libvirt_utils.get_instance_path(instance)
LOG.info(_('Deleting instance files %s'), target,
instance=instance)
if os.path.exists(target):
# If we fail to get rid of the directory
# tree, this shouldn't block deletion of
# the instance as whole.
try:
shutil.rmtree(target)
except OSError as e:
LOG.error(_('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target, 'e': e})
self._delete_instance_files(instance)
#NOTE(bfilippov): destroy all LVM disks for this instance
self._cleanup_lvm(instance)
@@ -4065,6 +4055,42 @@ class LibvirtDriver(driver.ComputeDriver):
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def _delete_instance_files(self, instance):
# NOTE(mikal): a shim to handle this file not using instance objects
# everywhere. Remove this when that conversion happens.
context = nova_context.get_admin_context()
inst_obj = instance_obj.Instance.get_by_uuid(context, instance['uuid'])
# NOTE(mikal): this code should be pushed up a layer when this shim is
# removed.
attempts = int(inst_obj.system_metadata.get('clean_attempts', '0'))
success = self.delete_instance_files(inst_obj)
inst_obj.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
inst_obj.cleaned = True
inst_obj.save(context)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
if os.path.exists(target):
LOG.info(_('Deleting instance files %s'), target,
instance=instance)
try:
shutil.rmtree(target)
except OSError as e:
LOG.error(_('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if os.path.exists(target):
LOG.info(_('Deletion of %s failed'), target, instance=instance)
return False
LOG.info(_('Deletion of %s complete'), target, instance=instance)
return True
class HostState(object):
"""Manages information about the compute node through libvirt."""