Merge "compute: Move detach logic from manager into driver BDM"
This commit is contained in:
+35
-115
@@ -99,7 +99,6 @@ from nova.virt import event as virtevent
|
||||
from nova.virt import storage_users
|
||||
from nova.virt import virtapi
|
||||
from nova.volume import cinder
|
||||
from nova.volume import encryptors
|
||||
|
||||
CONF = nova.conf.CONF
|
||||
|
||||
@@ -4826,39 +4825,35 @@ class ComputeManager(manager.Manager):
|
||||
self._notify_about_instance_usage(
|
||||
context, instance, "volume.attach", extra_usage_info=info)
|
||||
|
||||
def _driver_detach_volume(self, context, instance, bdm, connection_info):
|
||||
"""Do the actual driver detach using block device mapping."""
|
||||
def _notify_volume_usage_detach(self, context, instance, bdm):
|
||||
if CONF.volume_usage_poll_interval <= 0:
|
||||
return
|
||||
|
||||
vol_stats = []
|
||||
mp = bdm.device_name
|
||||
volume_id = bdm.volume_id
|
||||
|
||||
LOG.info(_LI('Detach volume %(volume_id)s from mountpoint %(mp)s'),
|
||||
{'volume_id': volume_id, 'mp': mp},
|
||||
instance=instance)
|
||||
|
||||
# Handle bootable volumes which will not contain /dev/
|
||||
if '/dev/' in mp:
|
||||
mp = mp[5:]
|
||||
try:
|
||||
if not self.driver.instance_exists(instance):
|
||||
LOG.warning(_LW('Detaching volume from unknown instance'),
|
||||
instance=instance)
|
||||
vol_stats = self.driver.block_stats(instance, mp)
|
||||
except NotImplementedError:
|
||||
return
|
||||
|
||||
encryption = encryptors.get_encryption_metadata(
|
||||
context, self.volume_api, volume_id, connection_info)
|
||||
|
||||
self.driver.detach_volume(connection_info,
|
||||
instance,
|
||||
mp,
|
||||
encryption=encryption)
|
||||
except exception.DiskNotFound as err:
|
||||
LOG.warning(_LW('Ignoring DiskNotFound exception while detaching '
|
||||
'volume %(volume_id)s from %(mp)s: %(err)s'),
|
||||
{'volume_id': volume_id, 'mp': mp, 'err': err},
|
||||
instance=instance)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Failed to detach volume %(volume_id)s '
|
||||
'from %(mp)s'),
|
||||
{'volume_id': volume_id, 'mp': mp},
|
||||
instance=instance)
|
||||
self.volume_api.roll_detaching(context, volume_id)
|
||||
LOG.debug("Updating volume usage cache with totals", instance=instance)
|
||||
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
|
||||
vol_usage = objects.VolumeUsage(context)
|
||||
vol_usage.volume_id = bdm.volume_id
|
||||
vol_usage.instance_uuid = instance.uuid
|
||||
vol_usage.project_id = instance.project_id
|
||||
vol_usage.user_id = instance.user_id
|
||||
vol_usage.availability_zone = instance.availability_zone
|
||||
vol_usage.curr_reads = rd_req
|
||||
vol_usage.curr_read_bytes = rd_bytes
|
||||
vol_usage.curr_writes = wr_req
|
||||
vol_usage.curr_write_bytes = wr_bytes
|
||||
vol_usage.save(update_totals=True)
|
||||
self.notifier.info(context, 'volume.usage',
|
||||
compute_utils.usage_volume_info(vol_usage))
|
||||
|
||||
def _detach_volume(self, context, volume_id, instance, destroy_bdm=True,
|
||||
attachment_id=None):
|
||||
@@ -4872,94 +4867,18 @@ class ComputeManager(manager.Manager):
|
||||
like rebuild, when we don't want to destroy BDM
|
||||
|
||||
"""
|
||||
|
||||
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
|
||||
context, volume_id, instance.uuid)
|
||||
if CONF.volume_usage_poll_interval > 0:
|
||||
vol_stats = []
|
||||
mp = bdm.device_name
|
||||
# Handle bootable volumes which will not contain /dev/
|
||||
if '/dev/' in mp:
|
||||
mp = mp[5:]
|
||||
try:
|
||||
vol_stats = self.driver.block_stats(instance, mp)
|
||||
except NotImplementedError:
|
||||
pass
|
||||
|
||||
if vol_stats:
|
||||
LOG.debug("Updating volume usage cache with totals",
|
||||
instance=instance)
|
||||
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
|
||||
vol_usage = objects.VolumeUsage(context)
|
||||
vol_usage.volume_id = volume_id
|
||||
vol_usage.instance_uuid = instance.uuid
|
||||
vol_usage.project_id = instance.project_id
|
||||
vol_usage.user_id = instance.user_id
|
||||
vol_usage.availability_zone = instance.availability_zone
|
||||
vol_usage.curr_reads = rd_req
|
||||
vol_usage.curr_read_bytes = rd_bytes
|
||||
vol_usage.curr_writes = wr_req
|
||||
vol_usage.curr_write_bytes = wr_bytes
|
||||
vol_usage.save(update_totals=True)
|
||||
self.notifier.info(context, 'volume.usage',
|
||||
compute_utils.usage_volume_info(vol_usage))
|
||||
self._notify_volume_usage_detach(context, instance, bdm)
|
||||
|
||||
connection_info = jsonutils.loads(bdm.connection_info)
|
||||
connector = self.driver.get_volume_connector(instance)
|
||||
if CONF.host == instance.host:
|
||||
# Only attempt to detach and disconnect from the volume if the
|
||||
# instance is currently associated with the local compute host.
|
||||
self._driver_detach_volume(context, instance, bdm, connection_info)
|
||||
elif not destroy_bdm:
|
||||
LOG.debug("Skipping _driver_detach_volume during remote rebuild.",
|
||||
instance=instance)
|
||||
elif destroy_bdm:
|
||||
LOG.error(_LE("Unable to call for a driver detach of volume "
|
||||
"%(vol_id)s due to the instance being registered to "
|
||||
"the remote host %(inst_host)s."),
|
||||
{'vol_id': volume_id, 'inst_host': instance.host},
|
||||
instance=instance)
|
||||
LOG.info(_LI('Detaching volume %(volume_id)s'),
|
||||
{'volume_id': volume_id}, instance=instance)
|
||||
|
||||
if connection_info and not destroy_bdm and (
|
||||
connector.get('host') != instance.host):
|
||||
# If the volume is attached to another host (evacuate) then
|
||||
# this connector is for the wrong host. Use the connector that
|
||||
# was stored in connection_info instead (if we have one, and it
|
||||
# is for the expected host).
|
||||
stashed_connector = connection_info.get('connector')
|
||||
if not stashed_connector:
|
||||
# Volume was attached before we began stashing connectors
|
||||
LOG.warning(_LW("Host mismatch detected, but stashed "
|
||||
"volume connector not found. Instance host is "
|
||||
"%(ihost)s, but volume connector host is "
|
||||
"%(chost)s."),
|
||||
{'ihost': instance.host,
|
||||
'chost': connector.get('host')})
|
||||
elif stashed_connector.get('host') != instance.host:
|
||||
# Unexpected error. The stashed connector is also not matching
|
||||
# the needed instance host.
|
||||
LOG.error(_LE("Host mismatch detected in stashed volume "
|
||||
"connector. Will use local volume connector. "
|
||||
"Instance host is %(ihost)s. Local volume "
|
||||
"connector host is %(chost)s. Stashed volume "
|
||||
"connector host is %(schost)s."),
|
||||
{'ihost': instance.host,
|
||||
'chost': connector.get('host'),
|
||||
'schost': stashed_connector.get('host')})
|
||||
else:
|
||||
# Fix found. Use stashed connector.
|
||||
LOG.debug("Host mismatch detected. Found usable stashed "
|
||||
"volume connector. Instance host is %(ihost)s. "
|
||||
"Local volume connector host was %(chost)s. "
|
||||
"Stashed volume connector host is %(schost)s.",
|
||||
{'ihost': instance.host,
|
||||
'chost': connector.get('host'),
|
||||
'schost': stashed_connector.get('host')})
|
||||
connector = stashed_connector
|
||||
driver_bdm = driver_block_device.convert_volume(bdm)
|
||||
driver_bdm.detach(context, instance, self.volume_api, self.driver,
|
||||
attachment_id=attachment_id, destroy_bdm=destroy_bdm)
|
||||
|
||||
self.volume_api.terminate_connection(context, volume_id, connector)
|
||||
self.volume_api.detach(context.elevated(), volume_id, instance.uuid,
|
||||
attachment_id)
|
||||
info = dict(volume_id=volume_id)
|
||||
self._notify_about_instance_usage(
|
||||
context, instance, "volume.detach", extra_usage_info=info)
|
||||
@@ -5137,9 +5056,10 @@ class ComputeManager(manager.Manager):
|
||||
try:
|
||||
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
|
||||
context, volume_id, instance.uuid)
|
||||
connection_info = jsonutils.loads(bdm.connection_info)
|
||||
self._driver_detach_volume(context, instance, bdm, connection_info)
|
||||
connector = self.driver.get_volume_connector(instance)
|
||||
driver_bdm = driver_block_device.convert_volume(bdm)
|
||||
driver_bdm.driver_detach(context, instance, connector,
|
||||
self.volume_api, self.driver)
|
||||
self.volume_api.terminate_connection(context, volume_id, connector)
|
||||
except exception.NotFound:
|
||||
pass
|
||||
|
||||
@@ -86,6 +86,7 @@ from nova.tests.unit import utils as test_utils
|
||||
from nova.tests import uuidsentinel as uuids
|
||||
from nova import utils
|
||||
from nova.virt import block_device as driver_block_device
|
||||
from nova.virt.block_device import DriverVolumeBlockDevice as driver_bdm_volume
|
||||
from nova.virt import event
|
||||
from nova.virt import fake
|
||||
from nova.virt import hardware
|
||||
@@ -421,7 +422,7 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
instance = self._create_fake_instance_obj()
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(self.compute, '_driver_detach_volume'),
|
||||
mock.patch.object(driver_bdm_volume, 'driver_detach'),
|
||||
mock.patch.object(self.compute.volume_api, 'detach'),
|
||||
mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance'),
|
||||
@@ -441,7 +442,7 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
instance = self._create_fake_instance_obj()
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(self.compute, '_driver_detach_volume'),
|
||||
mock.patch.object(driver_bdm_volume, 'driver_detach'),
|
||||
mock.patch.object(self.compute.volume_api, 'detach'),
|
||||
mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance'),
|
||||
@@ -10148,7 +10149,7 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
def test_detach_volume_not_found(self):
|
||||
# Ensure that a volume can be detached even when it is removed
|
||||
# from an instance but remaining in bdm. See bug #1367964.
|
||||
|
||||
# TODO(lyarwood): Move into ../virt/test_block_device.py
|
||||
instance = self._create_fake_instance_obj()
|
||||
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
|
||||
{'source_type': 'volume', 'destination_type': 'volume',
|
||||
@@ -11911,7 +11912,7 @@ class EvacuateHostTestCase(BaseTestCase):
|
||||
|
||||
@mock.patch.object(cinder.API, 'detach')
|
||||
@mock.patch.object(compute_manager.ComputeManager, '_prep_block_device')
|
||||
@mock.patch.object(compute_manager.ComputeManager, '_driver_detach_volume')
|
||||
@mock.patch.object(driver_bdm_volume, 'driver_detach')
|
||||
def test_rebuild_on_remote_host_with_volumes(self, mock_drv_detach,
|
||||
mock_prep, mock_detach):
|
||||
"""Confirm that the evacuate scenario does not attempt a driver detach
|
||||
|
||||
@@ -59,6 +59,7 @@ from nova.tests.unit.objects import test_instance_fault
|
||||
from nova.tests.unit.objects import test_instance_info_cache
|
||||
from nova.tests import uuidsentinel as uuids
|
||||
from nova import utils
|
||||
from nova.virt.block_device import DriverVolumeBlockDevice as driver_bdm_volume
|
||||
from nova.virt import driver as virt_driver
|
||||
from nova.virt import event as virtevent
|
||||
from nova.virt import fake as fake_driver
|
||||
@@ -2414,75 +2415,84 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
self.assertEqual(reboot_type, 'HARD')
|
||||
|
||||
@mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_and_instance')
|
||||
@mock.patch('nova.compute.manager.ComputeManager._driver_detach_volume')
|
||||
@mock.patch('nova.objects.Instance._from_db_object')
|
||||
def test_remove_volume_connection(self, inst_from_db, detach, bdm_get):
|
||||
bdm = mock.sentinel.bdm
|
||||
bdm.connection_info = jsonutils.dumps({})
|
||||
inst_obj = mock.Mock()
|
||||
inst_obj.uuid = 'uuid'
|
||||
def test_remove_volume_connection(self, bdm_get):
|
||||
inst = mock.Mock()
|
||||
inst.uuid = uuids.instance_uuid
|
||||
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
|
||||
{'source_type': 'volume', 'destination_type': 'volume',
|
||||
'volume_id': uuids.volume_id, 'device_name': '/dev/vdb',
|
||||
'connection_info': '{"test": "test"}'})
|
||||
bdm = objects.BlockDeviceMapping(context=self.context, **fake_bdm)
|
||||
bdm_get.return_value = bdm
|
||||
inst_from_db.return_value = inst_obj
|
||||
with mock.patch.object(self.compute, 'volume_api'):
|
||||
self.compute.remove_volume_connection(self.context, 'vol',
|
||||
inst_obj)
|
||||
detach.assert_called_once_with(self.context, inst_obj, bdm, {})
|
||||
bdm_get.assert_called_once_with(self.context, 'vol', 'uuid')
|
||||
with test.nested(
|
||||
mock.patch.object(self.compute, 'volume_api'),
|
||||
mock.patch.object(self.compute, 'driver'),
|
||||
mock.patch.object(driver_bdm_volume, 'driver_detach'),
|
||||
) as (mock_volume_api, mock_virt_driver, mock_driver_detach):
|
||||
connector = mock.Mock()
|
||||
mock_virt_driver.get_volume_connector.return_value = connector
|
||||
self.compute.remove_volume_connection(self.context,
|
||||
uuids.volume_id, inst)
|
||||
|
||||
bdm_get.assert_called_once_with(self.context, uuids.volume_id,
|
||||
uuids.instance_uuid)
|
||||
mock_driver_detach.assert_called_once_with(self.context, inst,
|
||||
connector, mock_volume_api, mock_virt_driver)
|
||||
mock_volume_api.terminate_connection.assert_called_once_with(
|
||||
self.context, uuids.volume_id, connector)
|
||||
|
||||
def test_detach_volume(self):
|
||||
# TODO(lyarwood): Move into ../virt/test_block_device.py
|
||||
self._test_detach_volume()
|
||||
|
||||
def test_detach_volume_not_destroy_bdm(self):
|
||||
# TODO(lyarwood): Move into ../virt/test_block_device.py
|
||||
self._test_detach_volume(destroy_bdm=False)
|
||||
|
||||
@mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_and_instance')
|
||||
@mock.patch('nova.compute.manager.ComputeManager._driver_detach_volume')
|
||||
@mock.patch.object(driver_bdm_volume, 'detach')
|
||||
@mock.patch('nova.compute.manager.ComputeManager.'
|
||||
'_notify_about_instance_usage')
|
||||
def _test_detach_volume(self, notify_inst_usage, detach,
|
||||
bdm_get, destroy_bdm=True):
|
||||
# TODO(lyarwood): Move into ../virt/test_block_device.py
|
||||
volume_id = uuids.volume
|
||||
inst_obj = mock.Mock()
|
||||
inst_obj.uuid = uuids.instance
|
||||
inst_obj.host = CONF.host
|
||||
attachment_id = uuids.attachment
|
||||
|
||||
bdm = mock.MagicMock(spec=objects.BlockDeviceMapping)
|
||||
bdm.device_name = 'vdb'
|
||||
bdm.connection_info = jsonutils.dumps({})
|
||||
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
|
||||
{'source_type': 'volume', 'destination_type': 'volume',
|
||||
'volume_id': volume_id, 'device_name': '/dev/vdb',
|
||||
'connection_info': '{"test": "test"}'})
|
||||
bdm = objects.BlockDeviceMapping(context=self.context, **fake_bdm)
|
||||
bdm_get.return_value = bdm
|
||||
|
||||
detach.return_value = {}
|
||||
with test.nested(
|
||||
mock.patch.object(self.compute, 'volume_api'),
|
||||
mock.patch.object(self.compute, 'driver'),
|
||||
mock.patch.object(bdm, 'destroy'),
|
||||
) as (volume_api, driver, bdm_destroy):
|
||||
self.compute._detach_volume(self.context, volume_id, inst_obj,
|
||||
destroy_bdm=destroy_bdm,
|
||||
attachment_id=attachment_id)
|
||||
detach.assert_called_once_with(self.context, inst_obj,
|
||||
self.compute.volume_api, self.compute.driver,
|
||||
attachment_id=attachment_id,
|
||||
destroy_bdm=destroy_bdm)
|
||||
notify_inst_usage.assert_called_once_with(
|
||||
self.context, inst_obj, "volume.detach",
|
||||
extra_usage_info={'volume_id': volume_id})
|
||||
|
||||
with mock.patch.object(self.compute, 'volume_api') as volume_api:
|
||||
with mock.patch.object(self.compute, 'driver') as driver:
|
||||
connector_sentinel = mock.sentinel.connector
|
||||
driver.get_volume_connector.return_value = connector_sentinel
|
||||
|
||||
self.compute._detach_volume(self.context, volume_id,
|
||||
inst_obj,
|
||||
destroy_bdm=destroy_bdm,
|
||||
attachment_id=attachment_id)
|
||||
|
||||
detach.assert_called_once_with(self.context, inst_obj, bdm, {})
|
||||
driver.get_volume_connector.assert_called_once_with(inst_obj)
|
||||
volume_api.terminate_connection.assert_called_once_with(
|
||||
self.context, volume_id, connector_sentinel)
|
||||
volume_api.detach.assert_called_once_with(mock.ANY, volume_id,
|
||||
inst_obj.uuid,
|
||||
attachment_id)
|
||||
notify_inst_usage.assert_called_once_with(
|
||||
self.context, inst_obj, "volume.detach",
|
||||
extra_usage_info={'volume_id': volume_id}
|
||||
)
|
||||
|
||||
if destroy_bdm:
|
||||
bdm.destroy.assert_called_once_with()
|
||||
else:
|
||||
self.assertFalse(bdm.destroy.called)
|
||||
if destroy_bdm:
|
||||
bdm_destroy.assert_called_once_with()
|
||||
else:
|
||||
self.assertFalse(bdm_destroy.called)
|
||||
|
||||
def test_detach_volume_evacuate(self):
|
||||
"""For evacuate, terminate_connection is called with original host."""
|
||||
# TODO(lyarwood): Move into ../virt/test_block_device.py
|
||||
expected_connector = {'host': 'evacuated-host'}
|
||||
conn_info_str = '{"connector": {"host": "evacuated-host"}}'
|
||||
self._test_detach_volume_evacuate(conn_info_str,
|
||||
@@ -2497,6 +2507,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
case because nova does not have the info to get the connector for the
|
||||
original (evacuated) host.
|
||||
"""
|
||||
# TODO(lyarwood): Move into ../virt/test_block_device.py
|
||||
conn_info_str = '{"foo": "bar"}' # Has no 'connector'.
|
||||
self._test_detach_volume_evacuate(conn_info_str)
|
||||
|
||||
@@ -2506,6 +2517,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
For evacuate, if the stashed connector also has the wrong host,
|
||||
then log it and stay with the local connector.
|
||||
"""
|
||||
# TODO(lyarwood): Move into ../virt/test_block_device.py
|
||||
conn_info_str = '{"connector": {"host": "other-host"}}'
|
||||
self._test_detach_volume_evacuate(conn_info_str)
|
||||
|
||||
@@ -2521,37 +2533,45 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
terminate call (optional). Default is to expect the
|
||||
local connector to be used.
|
||||
"""
|
||||
# TODO(lyarwood): Move into ../virt/test_block_device.py
|
||||
volume_id = 'vol_id'
|
||||
instance = fake_instance.fake_instance_obj(self.context,
|
||||
host='evacuated-host')
|
||||
bdm = mock.Mock()
|
||||
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
|
||||
{'source_type': 'volume', 'destination_type': 'volume',
|
||||
'volume_id': volume_id, 'device_name': '/dev/vdb',
|
||||
'connection_info': '{"test": "test"}'})
|
||||
bdm = objects.BlockDeviceMapping(context=self.context, **fake_bdm)
|
||||
bdm.connection_info = conn_info_str
|
||||
bdm_get.return_value = bdm
|
||||
|
||||
local_connector = {'host': 'local-connector-host'}
|
||||
expected_connector = local_connector if not expected else expected
|
||||
|
||||
with mock.patch.object(self.compute, 'volume_api') as volume_api:
|
||||
with mock.patch.object(self.compute, 'driver') as driver:
|
||||
driver.get_volume_connector.return_value = local_connector
|
||||
with test.nested(
|
||||
mock.patch.object(self.compute, 'volume_api'),
|
||||
mock.patch.object(self.compute, 'driver'),
|
||||
mock.patch.object(driver_bdm_volume, 'driver_detach'),
|
||||
) as (volume_api, driver, driver_detach):
|
||||
driver.get_volume_connector.return_value = local_connector
|
||||
|
||||
self.compute._detach_volume(self.context,
|
||||
volume_id,
|
||||
instance,
|
||||
destroy_bdm=False)
|
||||
self.compute._detach_volume(self.context,
|
||||
volume_id,
|
||||
instance,
|
||||
destroy_bdm=False)
|
||||
|
||||
driver._driver_detach_volume.assert_not_called()
|
||||
driver.get_volume_connector.assert_called_once_with(instance)
|
||||
volume_api.terminate_connection.assert_called_once_with(
|
||||
self.context, volume_id, expected_connector)
|
||||
volume_api.detach.assert_called_once_with(mock.ANY,
|
||||
volume_id,
|
||||
instance.uuid,
|
||||
None)
|
||||
notify_inst_usage.assert_called_once_with(
|
||||
self.context, instance, "volume.detach",
|
||||
extra_usage_info={'volume_id': volume_id}
|
||||
)
|
||||
driver_detach.assert_not_called()
|
||||
driver.get_volume_connector.assert_called_once_with(instance)
|
||||
volume_api.terminate_connection.assert_called_once_with(
|
||||
self.context, volume_id, expected_connector)
|
||||
volume_api.detach.assert_called_once_with(mock.ANY,
|
||||
volume_id,
|
||||
instance.uuid,
|
||||
None)
|
||||
notify_inst_usage.assert_called_once_with(
|
||||
self.context, instance, "volume.detach",
|
||||
extra_usage_info={'volume_id': volume_id}
|
||||
)
|
||||
|
||||
def _test_rescue(self, clean_shutdown=True):
|
||||
instance = fake_instance.fake_instance_obj(
|
||||
|
||||
@@ -153,6 +153,24 @@ class DriverBlockDevice(dict):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def detach(self, **kwargs):
|
||||
"""Detach the device from an instance and detach in Cinder.
|
||||
|
||||
Note: driver_detach is called as part of this method.
|
||||
|
||||
To be overridden in subclasses with the detaching logic for
|
||||
the type of device the subclass represents.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def driver_detach(self, **kwargs):
|
||||
"""Detach the device from an instance (don't detach in Cinder).
|
||||
|
||||
To be overridden in subclasses with the detaching logic for
|
||||
the type of device the subclass represents.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def save(self):
|
||||
for attr_name, key_name in self._update_on_save.items():
|
||||
lookup_name = key_name or attr_name
|
||||
@@ -242,6 +260,99 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
|
||||
LOG.info(_LI('preserve multipath_id %s'),
|
||||
connection_info['data']['multipath_id'])
|
||||
|
||||
def driver_detach(self, context, instance, volume_api, virt_driver):
|
||||
connection_info = self['connection_info']
|
||||
mp = self['mount_device']
|
||||
volume_id = self.volume_id
|
||||
|
||||
LOG.info(_LI('Attempting to driver detach volume %(volume_id)s from '
|
||||
' mountpoint %(mp)s'), {'volume_id': volume_id, 'mp': mp},
|
||||
instance=instance)
|
||||
try:
|
||||
if not virt_driver.instance_exists(instance):
|
||||
LOG.warning(_LW('Detaching volume from unknown instance'),
|
||||
instance=instance)
|
||||
|
||||
encryption = encryptors.get_encryption_metadata(context,
|
||||
volume_api, volume_id, connection_info)
|
||||
virt_driver.detach_volume(connection_info, instance, mp,
|
||||
encryption=encryption)
|
||||
except exception.DiskNotFound as err:
|
||||
LOG.warning(_LW('Ignoring DiskNotFound exception while '
|
||||
'detaching volume %(volume_id)s from '
|
||||
'%(mp)s : %(err)s'),
|
||||
{'volume_id': volume_id, 'mp': mp,
|
||||
'err': err}, instance=instance)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Failed to detach volume '
|
||||
'%(volume_id)s from %(mp)s'),
|
||||
{'volume_id': volume_id, 'mp': mp},
|
||||
instance=instance)
|
||||
volume_api.roll_detaching(context, volume_id)
|
||||
|
||||
def detach(self, context, instance, volume_api, virt_driver,
|
||||
attachment_id=None, destroy_bdm=False):
|
||||
|
||||
connector = virt_driver.get_volume_connector(instance)
|
||||
connection_info = self['connection_info']
|
||||
volume_id = self.volume_id
|
||||
|
||||
# Only attempt to detach and disconnect from the volume if the instance
|
||||
# is currently associated with the local compute host.
|
||||
if CONF.host == instance.host:
|
||||
self.driver_detach(context, instance, volume_api, virt_driver)
|
||||
elif not destroy_bdm:
|
||||
LOG.debug("Skipping _driver_detach during remote rebuild.",
|
||||
instance=instance)
|
||||
elif destroy_bdm:
|
||||
LOG.error(_LE("Unable to call for a driver detach of volume "
|
||||
"%(vol_id)s due to the instance being "
|
||||
"registered to the remote host %(inst_host)s."),
|
||||
{'vol_id': volume_id,
|
||||
'inst_host': instance.host}, instance=instance)
|
||||
|
||||
if connection_info and not destroy_bdm and (
|
||||
connector.get('host') != instance.host):
|
||||
# If the volume is attached to another host (evacuate) then
|
||||
# this connector is for the wrong host. Use the connector that
|
||||
# was stored in connection_info instead (if we have one, and it
|
||||
# is for the expected host).
|
||||
stashed_connector = connection_info.get('connector')
|
||||
if not stashed_connector:
|
||||
# Volume was attached before we began stashing connectors
|
||||
LOG.warning(_LW("Host mismatch detected, but stashed "
|
||||
"volume connector not found. Instance host is "
|
||||
"%(ihost)s, but volume connector host is "
|
||||
"%(chost)s."),
|
||||
{'ihost': instance.host,
|
||||
'chost': connector.get('host')})
|
||||
elif stashed_connector.get('host') != instance.host:
|
||||
# Unexpected error. The stashed connector is also not matching
|
||||
# the needed instance host.
|
||||
LOG.error(_LE("Host mismatch detected in stashed volume "
|
||||
"connector. Will use local volume connector. "
|
||||
"Instance host is %(ihost)s. Local volume "
|
||||
"connector host is %(chost)s. Stashed volume "
|
||||
"connector host is %(schost)s."),
|
||||
{'ihost': instance.host,
|
||||
'chost': connector.get('host'),
|
||||
'schost': stashed_connector.get('host')})
|
||||
else:
|
||||
# Fix found. Use stashed connector.
|
||||
LOG.debug("Host mismatch detected. Found usable stashed "
|
||||
"volume connector. Instance host is %(ihost)s. "
|
||||
"Local volume connector host was %(chost)s. "
|
||||
"Stashed volume connector host is %(schost)s.",
|
||||
{'ihost': instance.host,
|
||||
'chost': connector.get('host'),
|
||||
'schost': stashed_connector.get('host')})
|
||||
connector = stashed_connector
|
||||
|
||||
volume_api.terminate_connection(context, volume_id, connector)
|
||||
volume_api.detach(context.elevated(), volume_id, instance.uuid,
|
||||
attachment_id)
|
||||
|
||||
@update_db
|
||||
def attach(self, context, instance, volume_api, virt_driver,
|
||||
do_driver_attach=False, **kwargs):
|
||||
|
||||
Reference in New Issue
Block a user