Refactor code for setting up libvirt disk mappings

Currently the libvirt disk bus is fixed at the time the driver
is started. The get_guest_storage_config and _create_image
methods thus use some variables initialized in the libvirt
driver constructor to determine disk bus / dev name mappings.

It will shortly become possible to configure a different disk
bus per instance, which invalidates the current assumptions
in the code. A further complication is that the _create_image
and get_guest_storage_config methods needs to duplicate each
others logic for determining disk mapping.

To simplify the current code and make it more make flexible
introduce a new 'blockinfo.py' module in libvirt, and with
it a 'get_disk_mapping' method & associated helper APIs. This
method is responsible for examining the instance type and
block device info dicts and figuring out the complete list of
disks that will be attached to the guest & their optimal
disk bus + dev name values. This info is returned in a dict
and then passed to _create_image and get_guest_storage_config

Thus the logic for determining disk dev names is now isolated
in one single place, separate from the main driver code, so
it has no need to rely on state in the driver object.

Many, many test cases are added to try to thoroughly validate
the disk mapping code, since there are a huge set of possible
configurations the user may request, making it easy to break
the code accidentally.

Blueprint: libvirt-custom-hardware
Change-Id: I645e69fcc7088674f063f619b2acbbee94d7ba61
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
This commit is contained in:
Daniel P. Berrange
2013-01-15 20:43:48 +00:00
parent cfec3e7658
commit 7be531fe94
7 changed files with 1184 additions and 277 deletions
+126 -52
View File
@@ -55,6 +55,7 @@ from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
@@ -356,9 +357,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, None)
None, disk_info)
self.assertEquals(cfg.acpi, True)
self.assertEquals(cfg.apic, True)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
@@ -401,9 +404,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 2),
None, None)
None, disk_info)
self.assertEquals(cfg.acpi, True)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
@@ -433,14 +438,18 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref, [], None, None,
{'root_device_name': 'dev/vdb'})
block_device_info = {'root_device_name': '/dev/vdb'}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
block_device_info)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, block_device_info)
self.assertEquals(cfg.acpi, False)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, "uml")
self.assertEquals(cfg.os_boot_dev, None)
self.assertEquals(cfg.os_root, 'dev/vdb')
self.assertEquals(cfg.os_root, '/dev/vdb')
self.assertEquals(len(cfg.devices), 3)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -458,7 +467,10 @@ class LibvirtConnTestCase(test.TestCase):
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
cfg = conn.get_guest_config(instance_ref, [], None, None, info)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref, info)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, info)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[2].target_dev, 'vdc')
@@ -473,12 +485,13 @@ class LibvirtConnTestCase(test.TestCase):
# make configdrive.enabled_for() return True
instance_ref['config_drive'] = 'ANY_ID'
cfg = conn.get_guest_config(instance_ref, [], None, None)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[2].target_dev,
conn.default_last_device)
self.assertEquals(cfg.devices[2].target_dev, 'vdz')
def test_get_guest_config_with_vnc(self):
self.flags(libvirt_type='kvm',
@@ -489,7 +502,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref, [], None, None)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 5)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -513,7 +528,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref, [], None, None)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -542,7 +559,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref, [], None, None)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -571,7 +590,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref, [], None, None)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -600,7 +621,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
cfg = conn.get_guest_config(instance_ref, [], None, None)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 8)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -629,9 +652,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, None)
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_default_kvm(self):
@@ -647,9 +672,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, None)
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
@@ -662,9 +689,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, None)
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_default_lxc(self):
@@ -674,9 +703,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, None)
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_host_passthrough_new(self):
@@ -690,9 +721,11 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, None)
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-passthrough")
@@ -709,9 +742,11 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, None)
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
@@ -729,9 +764,11 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, None)
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "custom")
@@ -747,11 +784,14 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
self.assertRaises(exception.NovaException,
conn.get_guest_config,
instance_ref,
_fake_network_info(self.stubs, 1),
None, None)
None,
disk_info)
def test_get_guest_cpu_config_host_model_old(self):
def get_lib_version_stub(self):
@@ -781,9 +821,11 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, None)
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
@@ -805,9 +847,11 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, None)
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
@@ -1572,14 +1616,16 @@ class LibvirtConnTestCase(test.TestCase):
conn.attach_volume,
{"driver_volume_type": "badtype"},
{"name": "fake-instance"},
"/dev/fake")
"/dev/sda")
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _fake_network_info(self.stubs, 2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, instance_data)
xml = conn.to_xml(instance_ref, network_info, None, False)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEquals(len(interfaces), 2)
@@ -1599,7 +1645,9 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(conn.uri, 'lxc:///')
network_info = _fake_network_info(self.stubs, 1)
xml = conn.to_xml(instance_ref, network_info)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
check = [
@@ -1640,7 +1688,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
xml = conn.to_xml(instance_ref, network_info)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
@@ -1673,8 +1723,10 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
instance_ref, network_info, image_meta)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
@@ -1684,8 +1736,10 @@ class LibvirtConnTestCase(test.TestCase):
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
instance_ref, network_info, image_meta)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
@@ -1697,11 +1751,13 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
instance_ref,
network_info,
image_meta,
block_device_info=block_device_info)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
block_device_info,
image_meta)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
@@ -1724,8 +1780,10 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
instance_ref, network_info, image_meta)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
@@ -1879,7 +1937,11 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(conn.uri, expected_uri)
network_info = _fake_network_info(self.stubs, 1)
xml = conn.to_xml(instance_ref, network_info, None, rescue)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
rescue=rescue)
xml = conn.to_xml(instance_ref, network_info, disk_info,
rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
@@ -2216,9 +2278,14 @@ class LibvirtConnTestCase(test.TestCase):
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
conn.volume_driver_method('connect_volume',
v['connection_info'],
v['mount_device'].rpartition("/")[2])
v['connection_info'],
disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
@@ -2244,10 +2311,14 @@ class LibvirtConnTestCase(test.TestCase):
# Creating mocks
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
conn.volume_driver_method('connect_volume',
v['connection_info'],
v['mount_device'].
rpartition("/")[2])
disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
@@ -4254,16 +4325,18 @@ class LibvirtDriverTestCase(test.TestCase):
def fake_extend(path, size):
pass
def fake_to_xml(instance, network_info, image_meta=None, rescue=None,
def fake_to_xml(instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
pass
def fake_create_image(context, inst, libvirt_xml, suffix='',
disk_images=None, network_info=None,
block_device_info=None):
def fake_create_image(context, inst, libvirt_xml,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None):
pass
def fake_create_domain(xml, instance=None):
@@ -4321,7 +4394,8 @@ class LibvirtDriverTestCase(test.TestCase):
def fake_get_info(instance):
return {'state': power_state.RUNNING}
def fake_to_xml(instance, network_info, image_meta=None, rescue=None,
def fake_to_xml(instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None):
return ""
+396
View File
@@ -0,0 +1,396 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import block_device
from nova import context
from nova import db
from nova import test
import nova.tests.image.fake
from nova.virt.libvirt import blockinfo
class LibvirtBlockInfoTest(test.TestCase):
def setUp(self):
super(LibvirtBlockInfoTest, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.test_instance = {
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5'} # m1.small
def test_volume_in_mapping(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1},
{'num': 2,
'virtual_name': 'ephemeral2',
'device_name': '/dev/sdd',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'},
{'mount_device': '/dev/sdf',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
def _assert_volume_in_mapping(device_name, true_or_false):
self.assertEquals(
block_device.volume_in_mapping(device_name,
block_device_info),
true_or_false)
_assert_volume_in_mapping('sda', False)
_assert_volume_in_mapping('sdb', True)
_assert_volume_in_mapping('sdc1', True)
_assert_volume_in_mapping('sdd', True)
_assert_volume_in_mapping('sde', True)
_assert_volume_in_mapping('sdf', True)
_assert_volume_in_mapping('sdg', False)
_assert_volume_in_mapping('sdh1', False)
def test_find_disk_dev(self):
mapping = {
"disk.local": {
'dev': 'sda',
'bus': 'scsi',
'type': 'disk',
},
"disk.swap": {
'dev': 'sdc',
'bus': 'scsi',
'type': 'disk',
},
}
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi')
self.assertEqual(dev, 'sdb')
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi',
last_device=True)
self.assertEqual(dev, 'sdz')
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'virtio')
self.assertEqual(dev, 'vda')
def test_get_next_disk_dev(self):
mapping = {}
mapping['disk.local'] = blockinfo.get_next_disk_info(mapping,
'virtio')
self.assertEqual(mapping['disk.local'],
{'dev': 'vda', 'bus': 'virtio', 'type': 'disk'})
mapping['disk.swap'] = blockinfo.get_next_disk_info(mapping,
'virtio')
self.assertEqual(mapping['disk.swap'],
{'dev': 'vdb', 'bus': 'virtio', 'type': 'disk'})
mapping['disk.config'] = blockinfo.get_next_disk_info(mapping,
'ide',
'cdrom',
True)
self.assertEqual(mapping['disk.config'],
{'dev': 'hdd', 'bus': 'ide', 'type': 'cdrom'})
def test_get_disk_mapping_simple(self):
# The simplest possible disk mapping setup, all defaults
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide")
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
}
self.assertEqual(mapping, expect)
def test_get_disk_mapping_simple_rootdev(self):
# A simple disk mapping setup, but with custom root device name
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
block_device_info = {
'root_device_name': '/dev/sda'
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
block_device_info)
expect = {
'disk': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'},
'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'root': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}
}
self.assertEqual(mapping, expect)
def test_get_disk_mapping_rescue(self):
# A simple disk mapping setup, but in rescue mode
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
rescue=True)
expect = {
'disk.rescue': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'disk': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
}
self.assertEqual(mapping, expect)
def test_get_disk_mapping_simple_iso(self):
# A simple disk mapping setup, but with a ISO for root device
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
image_meta = {'disk_format': 'iso'}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
None,
image_meta)
expect = {
'disk': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom'},
'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'root': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom'}
}
self.assertEqual(mapping, expect)
def test_get_disk_mapping_simple_swap(self):
# A simple disk mapping setup, but with a swap device added
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
instance_ref['instance_type']['swap'] = 5
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide")
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.swap': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
}
self.assertEqual(mapping, expect)
def test_get_disk_mapping_simple_configdrive(self):
# A simple disk mapping setup, but with configdrive added
self.flags(force_config_drive=True)
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide")
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.config': {'bus': 'virtio', 'dev': 'vdz', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
}
self.assertEqual(mapping, expect)
def test_get_disk_mapping_ephemeral(self):
# A disk mapping with ephemeral devices
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
instance_ref['instance_type']['swap'] = 5
block_device_info = {
'ephemerals': [
{'num': 0, 'virtual_name': 'ephemeral0',
'device_name': '/dev/vdb', 'size': 10},
{'num': 1, 'virtual_name': 'ephemeral1',
'device_name': '/dev/vdc', 'size': 10},
{'num': 2, 'virtual_name': 'ephemeral2',
'device_name': '/dev/vdd', 'size': 10},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.eph1': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'disk'},
'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
}
self.assertEqual(mapping, expect)
def test_get_disk_mapping_custom_swap(self):
# A disk mapping with a swap device at position vdb. This
# should cause disk.local to be removed
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
block_device_info = {
'swap': {'device_name': '/dev/vdb',
'swap_size': 10},
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'disk.swap': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
}
self.assertEqual(mapping, expect)
def test_get_disk_mapping_blockdev_root(self):
# A disk mapping with a blockdev replacing the default root
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
block_device_info = {
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
block_device_info)
expect = {
'/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
}
self.assertEqual(mapping, expect)
def test_get_disk_mapping_blockdev_eph(self):
# A disk mapping with a blockdev replacing the ephemeral device
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
block_device_info = {
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vdb",
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
}
self.assertEqual(mapping, expect)
def test_get_disk_mapping_blockdev_many(self):
# A disk mapping with a blockdev replacing all devices
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
block_device_info = {
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
'delete_on_termination': True},
{'connection_info': "fake",
'mount_device': "/dev/vdb",
'delete_on_termination': True},
{'connection_info': "fake",
'mount_device': "/dev/vdc",
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
block_device_info)
expect = {
'/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'/dev/vdc': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
}
self.assertEqual(mapping, expect)
def test_get_disk_mapping_complex(self):
# The strangest possible disk mapping setup
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
block_device_info = {
'root_device_name': '/dev/vdf',
'swap': {'device_name': '/dev/vdy',
'swap_size': 10},
'ephemerals': [
{'num': 0, 'virtual_name': 'ephemeral0',
'device_name': '/dev/vdb', 'size': 10},
{'num': 1, 'virtual_name': 'ephemeral1',
'device_name': '/dev/vdc', 'size': 10},
],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
'delete_on_termination': True},
]
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk'},
'/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.eph1': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
'root': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk'}
}
self.assertEqual(mapping, expect)
+69 -29
View File
@@ -66,8 +66,12 @@ class LibvirtVolumeTestCase(test.TestCase):
},
'serial': 'fake_serial',
}
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./serial').text, 'fake_serial')
@@ -92,13 +96,17 @@ class LibvirtVolumeTestCase(test.TestCase):
iqn = 'iqn.2010-10.org.openstack:%s' % name
vol = {'id': 1, 'name': name}
connection_info = self.iscsi_connection(vol, location, iqn)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, mount_device)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
'-p', location),
('iscsiadm', '-m', 'node', '-T', iqn,
@@ -126,13 +134,17 @@ class LibvirtVolumeTestCase(test.TestCase):
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
vol = {'id': 1, 'name': name}
connection_info = self.iscsi_connection(vol, location, iqn)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, mount_device)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
'-p', location),
('iscsiadm', '-m', 'node', '-T', iqn,
@@ -155,13 +167,17 @@ class LibvirtVolumeTestCase(test.TestCase):
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
connection_info = self.sheepdog_connection(vol)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
self.assertEqual(tree.find('./source').get('name'), name)
libvirt_driver.disconnect_volume(connection_info, mount_device)
libvirt_driver.disconnect_volume(connection_info, "vde")
def rbd_connection(self, volume):
return {
@@ -180,15 +196,19 @@ class LibvirtVolumeTestCase(test.TestCase):
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
connection_info = self.rbd_connection(vol)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./source/auth'), None)
libvirt_driver.disconnect_volume(connection_info, mount_device)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_enabled(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
@@ -202,9 +222,13 @@ class LibvirtVolumeTestCase(test.TestCase):
connection_info['data']['auth_username'] = user
connection_info['data']['secret_type'] = secret_type
connection_info['data']['secret_uuid'] = uuid
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
@@ -213,7 +237,7 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.find('./auth').get('username'), user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), uuid)
libvirt_driver.disconnect_volume(connection_info, mount_device)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
@@ -232,9 +256,13 @@ class LibvirtVolumeTestCase(test.TestCase):
flags_user = 'bar'
self.flags(rbd_user=flags_user,
rbd_secret_uuid=flags_uuid)
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
@@ -243,7 +271,7 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.find('./auth').get('username'), flags_user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
libvirt_driver.disconnect_volume(connection_info, mount_device)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_disabled(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
@@ -257,16 +285,20 @@ class LibvirtVolumeTestCase(test.TestCase):
connection_info['data']['auth_username'] = user
connection_info['data']['secret_type'] = secret_type
connection_info['data']['secret_uuid'] = uuid
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./auth'), None)
libvirt_driver.disconnect_volume(connection_info, mount_device)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
@@ -287,9 +319,13 @@ class LibvirtVolumeTestCase(test.TestCase):
flags_user = 'bar'
self.flags(rbd_user=flags_user,
rbd_secret_uuid=flags_uuid)
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
@@ -298,7 +334,7 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.find('./auth').get('username'), flags_user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
libvirt_driver.disconnect_volume(connection_info, mount_device)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_nfs_driver(self):
# NOTE(vish) exists is to make driver assume connecting worked
@@ -313,12 +349,16 @@ class LibvirtVolumeTestCase(test.TestCase):
file_path = os.path.join(export_mnt_base, name)
connection_info = {'data': {'export': export_string, 'name': name}}
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'file')
self.assertEqual(tree.find('./source').get('file'), file_path)
libvirt_driver.disconnect_volume(connection_info, mount_device)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('stat', export_mnt_base),
+4 -4
View File
@@ -378,10 +378,10 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
instance_ref, network_info = self._get_running_instance()
self.connection.attach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/mnt/nova/something')
'/dev/sda')
self.connection.detach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/mnt/nova/something')
'/dev/sda')
@catch_notimplementederror
def test_attach_detach_different_power_states(self):
@@ -389,11 +389,11 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.connection.power_off(instance_ref)
self.connection.attach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/mnt/nova/something')
'/dev/sda')
self.connection.power_on(instance_ref)
self.connection.detach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/mnt/nova/something')
'/dev/sda')
@catch_notimplementederror
def test_get_info(self):
+387
View File
@@ -0,0 +1,387 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handling of block device information and mapping.
This module contains helper methods for intepreting the block
device information and determining the suitable mapping to
guest devices and libvirt XML.
Throughout these methods there are a number of standard
variables / types used
* 'mapping': a dict contains the storage device mapping.
For the default disk types it will contain the following
keys & values:
'disk' -> disk_info
'disk.rescue' -> disk_info
'disk.local' -> disk_info
'disk.swap' -> disk_info
'disk.config' -> disk_info
If any of the default disks are overriden by the block
device info mappings, the hash value will be None
For any ephemeral device there will also be a dict entry
'disk.eph$NUM' -> disk_info
For any volume device there will also be a dict entry:
$path -> disk_info
Finally a special key will refer to the root device:
'root' -> disk_info
* 'disk_info': a tuple specifying disk configuration
It contains the following 3 fields
(disk bus, disk dev, device type)
* 'disk_bus': the guest bus type ('ide', 'virtio', 'scsi', etc)
* 'disk_dev': the device name 'vda', 'hdc', 'sdf', 'xvde' etc
* 'device_type': type of device eg 'disk', 'cdrom', 'floppy'
"""
from nova import block_device
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import configdrive
from nova.virt import driver
LOG = logging.getLogger(__name__)
def has_disk_dev(mapping, disk_dev):
"""Determine if a disk device name has already been used.
Looks at all the keys in mapping to see if any
corresponding disk_info tuple has a device name
matching disk_dev
Returns True if the disk_dev is in use."""
for disk in mapping:
info = mapping[disk]
if info['dev'] == disk_dev:
return True
return False
def get_dev_prefix_for_disk_bus(disk_bus):
"""Determine the dev prefix for a disk bus.
Determine the dev prefix to be combined
with a disk number to fix a disk_dev.
eg 'hd' for 'ide' bus can be used to
form a disk dev 'hda'
Returns the dev prefix or raises an
exception if the disk bus is unknown."""
if disk_bus == "ide":
return "hd"
elif disk_bus == "virtio":
return "vd"
elif disk_bus == "xen":
# Two possible mappings for Xen, xvda or sda
# which are interchangable, so we pick sda
return "sd"
elif disk_bus == "scsi":
return "sd"
elif disk_bus == "usb":
return "sd"
elif disk_bus == "uml":
return "ubd"
else:
raise exception.NovaException(
_("Unable to determine disk prefix for %s") %
disk_bus)
def get_dev_count_for_disk_bus(disk_bus):
"""Determine the number disks supported.
Determine how many disks can be supported in
a single VM for a particular disk bus.
Returns the number of disks supported."""
if disk_bus == "ide":
return 4
else:
return 26
def find_disk_dev_for_disk_bus(mapping, bus, last_device=False):
"""Identify a free disk dev name for a bus.
Determines the possible disk dev names for
the bus, and then checks them in order until
it identifies one that is not yet used in the
disk mapping. If 'last_device' is set, it will
only consider the last available disk dev name.
Returns the chosen disk_dev name, or raises an
exception if none is available.
"""
dev_prefix = get_dev_prefix_for_disk_bus(bus)
max_dev = get_dev_count_for_disk_bus(bus)
if last_device:
devs = [max_dev - 1]
else:
devs = range(max_dev)
for idx in devs:
disk_dev = dev_prefix + chr(ord('a') + idx)
if not has_disk_dev(mapping, disk_dev):
return disk_dev
raise exception.NovaException(
_("No free disk device names for prefix '%s'"),
dev_prefix)
def get_disk_bus_for_device_type(virt_type, device_type="disk"):
"""Determine the best disk bus to use for a device type.
Considering the currently configured virtualization
type, return the optimal disk_bus to use for a given
device type. For example, for a disk on KVM it will
return 'virtio', while for a CDROM it will return 'ide'
Returns the disk_bus, or returns None if the device
type is not supported for this virtualization"""
if virt_type == "uml":
if device_type == "disk":
return "uml"
elif virt_type == "xen":
if device_type == "cdrom":
return "ide"
elif device_type == "disk":
return "xen"
elif virt_type in ("qemu", "kvm"):
if device_type == "cdrom":
return "ide"
elif device_type == "disk":
return "virtio"
return None
def get_disk_bus_for_disk_dev(virt_type, disk_dev):
"""Determine the disk bus for a disk dev.
Given a disk devi like 'hda', 'sdf', 'xvdb', etc
guess what the most appropriate disk bus is for
the currently configured virtualization technology
Returns the disk bus, or raises an Exception if
the disk dev prefix is unknown."""
if disk_dev[:2] == 'hd':
return "ide"
elif disk_dev[:2] == 'sd':
# Reverse mapping 'sd' is not reliable
# there are many possible mappings. So
# this picks the most likely mappings
if virt_type == "xen":
return "xen"
else:
return "scsi"
elif disk_dev[:2] == 'vd':
return "virtio"
elif disk_dev[:3] == 'xvd':
return "xen"
elif disk_dev[:3] == 'ubd':
return "uml"
else:
raise exception.NovaException(
_("Unable to determine disk bus for '%s'") %
disk_dev[:1])
def get_next_disk_info(mapping, disk_bus,
device_type='disk',
last_device=False):
"""Determine the disk info for the next device on disk_bus.
Considering the disks already listed in the disk mapping,
determine the next available disk dev that can be assigned
for the disk bus.
Returns the disk_info for the next available disk."""
disk_dev = find_disk_dev_for_disk_bus(mapping,
disk_bus,
last_device)
return {'bus': disk_bus,
'dev': disk_dev,
'type': device_type}
def get_eph_disk(ephemeral):
return 'disk.eph' + str(ephemeral['num'])
def get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
block_device_info=None,
image_meta=None, rescue=False):
"""Determine how to map default disks to the virtual machine.
This is about figuring out whether the default 'disk',
'disk.local', 'disk.swap' and 'disk.config' images have
been overriden by the block device mapping.
Returns the guest disk mapping for the devices."""
inst_type = instance['instance_type']
mapping = {}
if virt_type == "lxc":
return mapping
if rescue:
rescue_info = get_next_disk_info(mapping,
disk_bus)
mapping['disk.rescue'] = rescue_info
mapping['root'] = rescue_info
os_info = get_next_disk_info(mapping,
disk_bus)
mapping['disk'] = os_info
return mapping
if image_meta and image_meta.get('disk_format') == 'iso':
root_disk_bus = cdrom_bus
root_device_type = 'cdrom'
else:
root_disk_bus = disk_bus
root_device_type = 'disk'
root_device_name = driver.block_device_info_get_root(block_device_info)
if root_device_name is not None:
root_device = block_device.strip_dev(root_device_name)
root_info = {'bus': get_disk_bus_for_disk_dev(virt_type,
root_device),
'dev': root_device,
'type': root_device_type}
else:
root_info = get_next_disk_info(mapping,
root_disk_bus,
root_device_type)
mapping['root'] = root_info
if not block_device.volume_in_mapping(root_info['dev'],
block_device_info):
mapping['disk'] = root_info
eph_info = get_next_disk_info(mapping,
disk_bus)
ephemeral_device = False
if not (block_device.volume_in_mapping(eph_info['dev'],
block_device_info) or
0 in [eph['num'] for eph in
driver.block_device_info_get_ephemerals(
block_device_info)]):
if instance['ephemeral_gb'] > 0:
ephemeral_device = True
if ephemeral_device:
mapping['disk.local'] = eph_info
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
disk_dev = block_device.strip_dev(eph['device_name'])
disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
mapping[get_eph_disk(eph)] = {'bus': disk_bus,
'dev': disk_dev,
'type': 'disk'}
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
disk_dev = block_device.strip_dev(swap['device_name'])
disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
mapping['disk.swap'] = {'bus': disk_bus,
'dev': disk_dev,
'type': 'disk'}
elif inst_type['swap'] > 0:
swap_info = get_next_disk_info(mapping,
disk_bus)
if not block_device.volume_in_mapping(swap_info['dev'],
block_device_info):
mapping['disk.swap'] = swap_info
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
mapping[vol['mount_device']] = {'bus': disk_bus,
'dev': disk_dev,
'type': 'disk'}
if configdrive.enabled_for(instance):
config_info = get_next_disk_info(mapping,
disk_bus,
last_device=True)
mapping['disk.config'] = config_info
return mapping
def get_disk_info(virt_type, instance, block_device_info=None,
image_meta=None, rescue=False):
"""Determine guest disk mapping info.
This is a wrapper around get_disk_mapping, which
also returns the chosen disk_bus and cdrom_bus.
The returned data is in a dict
- disk_bus: the bus for harddisks
- cdrom_bus: the bus for CDROMs
- mapping: the disk mapping
Returns the disk mapping disk."""
disk_bus = get_disk_bus_for_device_type(virt_type, "disk")
cdrom_bus = get_disk_bus_for_device_type(virt_type, "cdrom")
mapping = get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
block_device_info,
image_meta, rescue)
return {'disk_bus': disk_bus,
'cdrom_bus': cdrom_bus,
'mapping': mapping}
+177 -174
View File
@@ -76,6 +76,7 @@ from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import firewall
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
@@ -261,10 +262,6 @@ MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
def _get_eph_disk(ephemeral):
return 'disk.eph' + str(ephemeral['num'])
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
@@ -295,16 +292,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._host_state = None
disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"}
if CONF.libvirt_disk_prefix:
self._disk_prefix = CONF.libvirt_disk_prefix
else:
self._disk_prefix = disk_prefix_map.get(CONF.libvirt_type, 'vd')
self.default_root_device = self._disk_prefix + 'a'
self.default_second_device = self._disk_prefix + 'b'
self.default_third_device = self._disk_prefix + 'c'
self.default_last_device = self._disk_prefix + 'z'
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
@@ -599,10 +586,10 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
mount_device)
disk_dev)
if destroy_disks:
target = libvirt_utils.get_instance_path(instance)
@@ -682,10 +669,16 @@ class LibvirtDriver(driver.ComputeDriver):
def attach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
disk_dev = mountpoint.rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
conf = self.volume_driver_method('connect_volume',
connection_info,
mount_device)
disk_info)
try:
# NOTE(vish): We can always affect config because our
@@ -701,14 +694,14 @@ class LibvirtDriver(driver.ComputeDriver):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self.volume_driver_method('disconnect_volume',
connection_info,
mount_device)
raise exception.DeviceIsBusy(device=mount_device)
connection_info,
disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self.volume_driver_method('disconnect_volume',
connection_info,
mount_device)
connection_info,
disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
@@ -730,18 +723,21 @@ class LibvirtDriver(driver.ComputeDriver):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
xml = self.to_xml(instance, network_info,
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
mount_device = mountpoint.rpartition("/")[2]
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=mount_device)
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
@@ -764,7 +760,7 @@ class LibvirtDriver(driver.ComputeDriver):
self.volume_driver_method('disconnect_volume',
connection_info,
mount_device)
disk_dev)
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
@@ -1031,7 +1027,10 @@ class LibvirtDriver(driver.ComputeDriver):
"""
self._destroy(instance)
xml = self.to_xml(instance, network_info,
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
@@ -1124,9 +1123,16 @@ class LibvirtDriver(driver.ComputeDriver):
'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
}
xml = self.to_xml(instance, network_info, image_meta,
rescue=rescue_images)
self._create_image(context, instance, xml, '.rescue', rescue_images,
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta,
rescue=True)
xml = self.to_xml(instance, network_info, disk_info,
image_meta, rescue=rescue_images)
self._create_image(context, instance, xml,
disk_info['mapping'],
'.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
self._destroy(instance)
@@ -1162,14 +1168,20 @@ class LibvirtDriver(driver.ComputeDriver):
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
xml = self.to_xml(instance, network_info, image_meta,
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
xml = self.to_xml(instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info)
if image_meta:
self._create_image(context, instance, xml,
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
@@ -1372,7 +1384,8 @@ class LibvirtDriver(driver.ComputeDriver):
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _create_image(self, context, instance, libvirt_xml, suffix='',
def _create_image(self, context, instance, libvirt_xml,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None, admin_pass=None):
if not suffix:
@@ -1432,8 +1445,7 @@ class LibvirtDriver(driver.ComputeDriver):
if size == 0 or suffix == '.rescue':
size = None
if not block_device.volume_in_mapping(
self.default_root_device, block_device_info):
if 'disk' in disk_mapping:
image('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=root_fname,
@@ -1448,9 +1460,7 @@ class LibvirtDriver(driver.ComputeDriver):
os_type_with_default = 'default'
ephemeral_gb = instance['ephemeral_gb']
if ephemeral_gb and not block_device.volume_in_mapping(
self.default_second_device, block_device_info):
swap_device = self.default_third_device
if 'disk.local' in disk_mapping:
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"])
@@ -1460,8 +1470,6 @@ class LibvirtDriver(driver.ComputeDriver):
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
else:
swap_device = self.default_second_device
for eph in driver.block_device_info_get_ephemerals(block_device_info):
fn = functools.partial(self._create_ephemeral,
@@ -1469,27 +1477,29 @@ class LibvirtDriver(driver.ComputeDriver):
os_type=instance["os_type"])
size = eph['size'] * 1024 * 1024 * 1024
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
image(_get_eph_disk(eph)).cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'])
image(blockinfo.get_eph_disk(eph)).cache(
fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'])
swap_mb = 0
if 'disk.swap' in disk_mapping:
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
swap_device, block_device_info)):
swap_mb = inst_type['swap']
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
swap['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * 1024 * 1024
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
if swap_mb > 0:
size = swap_mb * 1024 * 1024
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# target partition for file injection
target_partition = None
@@ -1644,11 +1654,23 @@ class LibvirtDriver(driver.ComputeDriver):
return cpu
def get_guest_disk_config(self, instance, name, disk_mapping,
image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode)
def get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type,
root_device_name, root_device):
inst_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
@@ -1660,112 +1682,64 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if image_meta and image_meta.get('disk_format') == 'iso':
root_device_type = 'cdrom'
root_device = 'hda'
else:
root_device_type = 'disk'
if CONF.libvirt_type == "uml":
default_disk_bus = "uml"
elif CONF.libvirt_type == "xen":
default_disk_bus = "xen"
else:
default_disk_bus = "virtio"
def disk_info(name, disk_dev, disk_bus=default_disk_bus,
device_type="disk"):
image = self.image_backend.image(instance, name)
return image.libvirt_info(disk_bus,
disk_dev,
device_type,
self.disk_cachemode)
if rescue:
diskrescue = disk_info('disk.rescue',
self.default_root_device,
device_type=root_device_type)
diskrescue = self.get_guest_disk_config(instance,
'disk.rescue',
disk_mapping)
devices.append(diskrescue)
diskos = disk_info('disk',
self.default_second_device)
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping)
devices.append(diskos)
else:
ebs_root = block_device.volume_in_mapping(
self.default_root_device, block_device_info)
if not ebs_root:
if root_device_type == "cdrom":
bus = "ide"
else:
bus = default_disk_bus
diskos = disk_info('disk',
root_device,
bus,
root_device_type)
if 'disk' in disk_mapping:
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping)
devices.append(diskos)
ephemeral_device = None
if not (block_device.volume_in_mapping(
self.default_second_device, block_device_info) or
0 in [eph['num'] for eph in
driver.block_device_info_get_ephemerals(
block_device_info)]):
if instance['ephemeral_gb'] > 0:
ephemeral_device = self.default_second_device
if ephemeral_device is not None:
disklocal = disk_info('disk.local', ephemeral_device)
if 'disk.local' in disk_mapping:
disklocal = self.get_guest_disk_config(instance,
'disk.local',
disk_mapping)
devices.append(disklocal)
if ephemeral_device is not None:
swap_device = self.default_third_device
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
'/dev/' + self.default_second_device})
else:
swap_device = self.default_second_device
'/dev/' + disklocal.target_dev})
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
diskeph = disk_info(_get_eph_disk(eph),
block_device.strip_dev(
eph['device_name']))
diskeph = self.get_guest_disk_config(
instance,
blockinfo.get_eph_disk(eph),
disk_mapping)
devices.append(diskeph)
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
diskswap = disk_info('disk.swap',
block_device.strip_dev(
swap['device_name']))
devices.append(diskswap)
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
swap_device, block_device_info)):
diskswap = disk_info('disk.swap', swap_device)
if 'disk.swap' in disk_mapping:
diskswap = self.get_guest_disk_config(instance,
'disk.swap',
disk_mapping)
devices.append(diskswap)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_swap_device': '/dev/' + swap_device})
{'default_swap_device': '/dev/' + diskswap.target_dev})
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
info = disk_mapping[vol['mount_device']]
cfg = self.volume_driver_method('connect_volume',
connection_info,
mount_device)
info)
devices.append(cfg)
if configdrive.enabled_for(instance):
diskconfig = vconfig.LibvirtConfigGuestDisk()
diskconfig.source_type = "file"
diskconfig.driver_format = "raw"
diskconfig.driver_cache = self.disk_cachemode
diskconfig.source_path = os.path.join(
libvirt_utils.get_instance_path(instance), "disk.config")
diskconfig.target_dev = self.default_last_device
diskconfig.target_bus = default_disk_bus
if 'disk.config' in disk_mapping:
diskconfig = self.get_guest_disk_config(instance,
'disk.config',
disk_mapping,
'raw')
devices.append(diskconfig)
return devices
@@ -1782,8 +1756,8 @@ class LibvirtDriver(driver.ComputeDriver):
return sysinfo
def get_guest_config(self, instance, network_info, image_meta, rescue=None,
block_device_info=None):
def get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
@@ -1792,6 +1766,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
inst_type = instance['instance_type']
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt_type
@@ -1802,16 +1777,17 @@ class LibvirtDriver(driver.ComputeDriver):
guest.cpu = self.get_guest_cpu_config()
root_device_name = driver.block_device_info_get_root(block_device_info)
if root_device_name:
root_device = block_device.strip_dev(root_device_name)
if 'root' in disk_mapping:
root_device_name = "/dev/" + disk_mapping['root']['dev']
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
root_device = self.default_root_device
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': '/dev/' + self.default_root_device})
{'root_device_name': '/dev/' + disk_mapping['disk']['dev']})
guest.os_type = vm_mode.get_from_instance(instance)
@@ -1841,10 +1817,10 @@ class LibvirtDriver(driver.ComputeDriver):
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name or "/dev/ubda"
guest.os_root = root_device_name
else:
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
guest.os_root = root_device_name or "/dev/xvda"
guest.os_root = root_device_name
else:
guest.os_type = vm_mode.HVM
@@ -1855,7 +1831,7 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s console=ttyS0" %
(root_device_name or "/dev/vda",))
root_device_name)
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
@@ -1865,7 +1841,7 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s console=ttyS0" %
(root_device_name or "/dev/vda",))
root_device_name)
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
@@ -1895,11 +1871,10 @@ class LibvirtDriver(driver.ComputeDriver):
for cfg in self.get_guest_storage_config(instance,
image_meta,
disk_info,
rescue,
block_device_info,
inst_type,
root_device_name,
root_device):
inst_type):
guest.add_device(cfg)
for (network, mapping) in network_info:
@@ -1971,11 +1946,17 @@ class LibvirtDriver(driver.ComputeDriver):
return guest
def to_xml(self, instance, network_info, image_meta=None, rescue=None,
def to_xml(self, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
LOG.debug(_('Starting toXML method'), instance=instance)
LOG.debug(_("Start to_xml instance=%(instance)s "
"network_info=%(network_info)s "
"disk_info=%(disk_info)s "
"image_meta=%(image_meta)s rescue=%(rescue)s"
"block_device_info=%(block_device_info)s") %
locals())
conf = self.get_guest_config(instance, network_info, image_meta,
rescue, block_device_info)
disk_info, rescue, block_device_info)
xml = conf.to_xml()
if write_to_disk:
@@ -1984,7 +1965,7 @@ class LibvirtDriver(driver.ComputeDriver):
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug(_('Finished toXML method'), instance=instance)
LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s') % locals())
return xml
def _lookup_by_name(self, instance_name):
@@ -2066,10 +2047,16 @@ class LibvirtDriver(driver.ComputeDriver):
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
self.volume_driver_method('connect_volume',
connection_info,
mount_device)
disk_info)
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
@@ -2811,10 +2798,16 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
self.volume_driver_method('connect_volume',
connection_info,
mount_device)
disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
@@ -2901,8 +2894,10 @@ class LibvirtDriver(driver.ComputeDriver):
if instance_ref["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
self.to_xml(instance_ref, network_info, block_device_info,
write_to_disk=True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
self.to_xml(instance_ref, network_info, disk_info,
block_device_info, write_to_disk=True)
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
@@ -3055,10 +3050,10 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
mount_device)
disk_dev)
# copy disks to destination
# rename instance dir to +_resize at first for using
@@ -3145,13 +3140,18 @@ class LibvirtDriver(driver.ComputeDriver):
'-O', 'qcow2', info['path'], path_qcow)
utils.execute('mv', path_qcow, info['path'])
xml = self.to_xml(instance, network_info,
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
# assume _create_image do nothing if a target file exists.
# TODO(oda): injecting files is not necessary
self._create_image(context, instance, xml,
network_info=network_info,
block_device_info=None)
disk_mapping=disk_info['mapping'],
network_info=network_info,
block_device_info=None)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
@@ -3167,7 +3167,10 @@ class LibvirtDriver(driver.ComputeDriver):
inst_base_resize = inst_base + "_resize"
utils.execute('mv', inst_base_resize, inst_base)
xml = self.to_xml(instance, network_info,
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
+25 -18
View File
@@ -62,19 +62,20 @@ class LibvirtBaseVolumeDriver(object):
self.connection = connection
self.is_block_dev = is_block_dev
def connect_volume(self, connection_info, mount_device):
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = virtutils.pick_disk_driver_name(self.is_block_dev)
conf.device_type = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = mount_device
conf.target_bus = "virtio"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
return conf
def disconnect_volume(self, connection_info, mount_device):
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
@@ -85,10 +86,11 @@ class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def connect_volume(self, connection_info, mount_device):
def connect_volume(self, connection_info, disk_info):
"""Connect the volume to a local device."""
conf = super(LibvirtVolumeDriver,
self).connect_volume(connection_info, mount_device)
self).connect_volume(connection_info,
disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
@@ -100,10 +102,11 @@ class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def connect_volume(self, connection_info, mount_device):
def connect_volume(self, connection_info, disk_info):
"""Connect the volume to a fake device."""
conf = super(LibvirtFakeVolumeDriver,
self).connect_volume(connection_info, mount_device)
self).connect_volume(connection_info,
disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_host = "fake"
@@ -116,9 +119,10 @@ class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def connect_volume(self, connection_info, mount_device):
def connect_volume(self, connection_info, disk_info):
conf = super(LibvirtNetVolumeDriver,
self).connect_volume(connection_info, mount_device)
self).connect_volume(connection_info,
disk_info)
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_host = connection_info['data']['name']
@@ -163,10 +167,11 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
@lockutils.synchronized('connect_volume', 'nova-')
def connect_volume(self, connection_info, mount_device):
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
conf = super(LibvirtISCSIVolumeDriver,
self).connect_volume(connection_info, mount_device)
self).connect_volume(connection_info,
disk_info)
iscsi_properties = connection_info['data']
# NOTE(vish): If we are on the same host as nova volume, the
@@ -210,12 +215,13 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
disk_dev = disk_info['dev']
while not os.path.exists(host_device):
if tries >= CONF.num_iscsi_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. "
LOG.warn(_("ISCSI volume not yet found at: %(disk_dev)s. "
"Will rescan & retry. Try number: %(tries)s") %
locals())
@@ -227,7 +233,7 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSCSI node %(mount_device)s "
LOG.debug(_("Found iSCSI node %(disk_dev)s "
"(after %(tries)s rescans)") %
locals())
@@ -236,10 +242,10 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
return conf
@lockutils.synchronized('connect_volume', 'nova-')
def disconnect_volume(self, connection_info, mount_device):
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, mount_device)
self).disconnect_volume(connection_info, disk_dev)
iscsi_properties = connection_info['data']
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
@@ -265,10 +271,11 @@ class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def connect_volume(self, connection_info, mount_device):
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).connect_volume(connection_info, mount_device)
self).connect_volume(connection_info,
disk_info)
path = self._ensure_mounted(connection_info['data']['export'])
path = os.path.join(path, connection_info['data']['name'])
conf.source_type = 'file'