Detach is broken for multi-attached fs-based volumes
Fixed an issue with detaching multi-attached fs-based volumes. Volume drivers using _HostMountStateManager are special case. _HostMountStateManager ensures that the compute node only attempts to mount a single mountpoint in use by multiple attachments once, and that it is not unmounted until it is no longer in use by any attachments. So we can skip the multiattach check for volume drivers that based on LibvirtMountedFileSystemVolumeDriver. Closes-Bug: 1888022 Change-Id: Ia91b63c0676f42ad8a7a0d16e6870bafc2ee7675
This commit is contained in:
@@ -121,6 +121,7 @@ from nova.virt.libvirt.storage import lvm
|
|||||||
from nova.virt.libvirt.storage import rbd_utils
|
from nova.virt.libvirt.storage import rbd_utils
|
||||||
from nova.virt.libvirt import utils as libvirt_utils
|
from nova.virt.libvirt import utils as libvirt_utils
|
||||||
from nova.virt.libvirt import vif as libvirt_vif
|
from nova.virt.libvirt import vif as libvirt_vif
|
||||||
|
from nova.virt.libvirt.volume import fs as fs_drivers
|
||||||
from nova.virt.libvirt.volume import volume as volume_drivers
|
from nova.virt.libvirt.volume import volume as volume_drivers
|
||||||
|
|
||||||
|
|
||||||
@@ -9149,6 +9150,20 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
mock_encryptor.attach_volume.assert_called_once_with(
|
mock_encryptor.attach_volume.assert_called_once_with(
|
||||||
self.context, **encryption)
|
self.context, **encryption)
|
||||||
|
|
||||||
|
def test_should_disconnect_target_multi_attach_filesystem_driver(self):
|
||||||
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||||
|
volume_driver = mock.MagicMock(
|
||||||
|
spec=fs_drivers.LibvirtMountedFileSystemVolumeDriver)
|
||||||
|
self.assertTrue(drvr._should_disconnect_target(
|
||||||
|
self.context, None, True, volume_driver, None))
|
||||||
|
|
||||||
|
def test_should_disconnect_target_single_attach_filesystem_driver(self):
|
||||||
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||||
|
volume_driver = mock.MagicMock(
|
||||||
|
spec=fs_drivers.LibvirtMountedFileSystemVolumeDriver)
|
||||||
|
self.assertTrue(drvr._should_disconnect_target(
|
||||||
|
self.context, None, False, volume_driver, None))
|
||||||
|
|
||||||
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption')
|
||||||
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor')
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor')
|
||||||
def test_disconnect_volume_native_luks_workaround(self,
|
def test_disconnect_volume_native_luks_workaround(self,
|
||||||
|
|||||||
+42
-30
@@ -125,6 +125,7 @@ from nova.virt.libvirt.storage import lvm
|
|||||||
from nova.virt.libvirt.storage import rbd_utils
|
from nova.virt.libvirt.storage import rbd_utils
|
||||||
from nova.virt.libvirt import utils as libvirt_utils
|
from nova.virt.libvirt import utils as libvirt_utils
|
||||||
from nova.virt.libvirt import vif as libvirt_vif
|
from nova.virt.libvirt import vif as libvirt_vif
|
||||||
|
from nova.virt.libvirt.volume import fs
|
||||||
from nova.virt.libvirt.volume import mount
|
from nova.virt.libvirt.volume import mount
|
||||||
from nova.virt.libvirt.volume import remotefs
|
from nova.virt.libvirt.volume import remotefs
|
||||||
from nova.virt import netutils
|
from nova.virt import netutils
|
||||||
@@ -1609,9 +1610,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||||||
"volume connection", instance=instance)
|
"volume connection", instance=instance)
|
||||||
vol_driver.disconnect_volume(connection_info, instance)
|
vol_driver.disconnect_volume(connection_info, instance)
|
||||||
|
|
||||||
def _should_disconnect_target(self, context, connection_info, instance):
|
def _should_disconnect_target(self, context, instance, multiattach,
|
||||||
connection_count = 0
|
vol_driver, volume_id):
|
||||||
|
|
||||||
# NOTE(jdg): Multiattach is a special case (not to be confused
|
# NOTE(jdg): Multiattach is a special case (not to be confused
|
||||||
# with shared_targets). With multiattach we may have a single volume
|
# with shared_targets). With multiattach we may have a single volume
|
||||||
# attached multiple times to *this* compute node (ie Server-1 and
|
# attached multiple times to *this* compute node (ie Server-1 and
|
||||||
@@ -1621,41 +1621,53 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||||||
# will indiscriminantly delete the connection for all Server and that's
|
# will indiscriminantly delete the connection for all Server and that's
|
||||||
# no good. So check if it's attached multiple times on this node
|
# no good. So check if it's attached multiple times on this node
|
||||||
# if it is we skip the call to brick to delete the connection.
|
# if it is we skip the call to brick to delete the connection.
|
||||||
if connection_info.get('multiattach', False):
|
if not multiattach:
|
||||||
volume = self._volume_api.get(
|
return True
|
||||||
context,
|
|
||||||
driver_block_device.get_volume_id(connection_info))
|
|
||||||
attachments = volume.get('attachments', {})
|
|
||||||
if len(attachments) > 1:
|
|
||||||
# First we get a list of all Server UUID's associated with
|
|
||||||
# this Host (Compute Node). We're going to use this to
|
|
||||||
# determine if the Volume being detached is also in-use by
|
|
||||||
# another Server on this Host, ie just check to see if more
|
|
||||||
# than one attachment.server_id for this volume is in our
|
|
||||||
# list of Server UUID's for this Host
|
|
||||||
servers_this_host = objects.InstanceList.get_uuids_by_host(
|
|
||||||
context, instance.host)
|
|
||||||
|
|
||||||
# NOTE(jdg): nova.volume.cinder translates the
|
# NOTE(deiter): Volume drivers using _HostMountStateManager are another
|
||||||
# volume['attachments'] response into a dict which includes
|
# special case. _HostMountStateManager ensures that the compute node
|
||||||
# the Server UUID as the key, so we're using that
|
# only attempts to mount a single mountpoint in use by multiple
|
||||||
# here to check against our server_this_host list
|
# attachments once, and that it is not unmounted until it is no longer
|
||||||
for server_id, data in attachments.items():
|
# in use by any attachments. So we can skip the multiattach check for
|
||||||
if server_id in servers_this_host:
|
# volume drivers that based on LibvirtMountedFileSystemVolumeDriver.
|
||||||
connection_count += 1
|
if isinstance(vol_driver, fs.LibvirtMountedFileSystemVolumeDriver):
|
||||||
|
return True
|
||||||
|
|
||||||
|
connection_count = 0
|
||||||
|
volume = self._volume_api.get(context, volume_id)
|
||||||
|
attachments = volume.get('attachments', {})
|
||||||
|
if len(attachments) > 1:
|
||||||
|
# First we get a list of all Server UUID's associated with
|
||||||
|
# this Host (Compute Node). We're going to use this to
|
||||||
|
# determine if the Volume being detached is also in-use by
|
||||||
|
# another Server on this Host, ie just check to see if more
|
||||||
|
# than one attachment.server_id for this volume is in our
|
||||||
|
# list of Server UUID's for this Host
|
||||||
|
servers_this_host = objects.InstanceList.get_uuids_by_host(
|
||||||
|
context, instance.host)
|
||||||
|
|
||||||
|
# NOTE(jdg): nova.volume.cinder translates the
|
||||||
|
# volume['attachments'] response into a dict which includes
|
||||||
|
# the Server UUID as the key, so we're using that
|
||||||
|
# here to check against our server_this_host list
|
||||||
|
for server_id, data in attachments.items():
|
||||||
|
if server_id in servers_this_host:
|
||||||
|
connection_count += 1
|
||||||
return (False if connection_count > 1 else True)
|
return (False if connection_count > 1 else True)
|
||||||
|
|
||||||
def _disconnect_volume(self, context, connection_info, instance,
|
def _disconnect_volume(self, context, connection_info, instance,
|
||||||
encryption=None):
|
encryption=None):
|
||||||
self._detach_encryptor(context, connection_info, encryption=encryption)
|
self._detach_encryptor(context, connection_info, encryption=encryption)
|
||||||
if self._should_disconnect_target(context, connection_info, instance):
|
vol_driver = self._get_volume_driver(connection_info)
|
||||||
vol_driver = self._get_volume_driver(connection_info)
|
volume_id = driver_block_device.get_volume_id(connection_info)
|
||||||
|
multiattach = connection_info.get('multiattach', False)
|
||||||
|
if self._should_disconnect_target(
|
||||||
|
context, instance, multiattach, vol_driver, volume_id):
|
||||||
vol_driver.disconnect_volume(connection_info, instance)
|
vol_driver.disconnect_volume(connection_info, instance)
|
||||||
else:
|
else:
|
||||||
LOG.info("Detected multiple connections on this host for volume: "
|
LOG.info('Detected multiple connections on this host for '
|
||||||
"%s, skipping target disconnect.",
|
'volume: %(volume)s, skipping target disconnect.',
|
||||||
driver_block_device.get_volume_id(connection_info),
|
{'volume': volume_id})
|
||||||
instance=instance)
|
|
||||||
|
|
||||||
def _extend_volume(self, connection_info, instance, requested_size):
|
def _extend_volume(self, connection_info, instance, requested_size):
|
||||||
vol_driver = self._get_volume_driver(connection_info)
|
vol_driver = self._get_volume_driver(connection_info)
|
||||||
|
|||||||
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
`Bug #1888022 <https://launchpad.net/bugs/1888022>`_:
|
||||||
|
An issue that prevented detach of multi-attached fs-based volumes
|
||||||
|
is resolved.
|
||||||
Reference in New Issue
Block a user