diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py
index 052b4115b1..a797a74e14 100644
--- a/nova/tests/virt/libvirt/test_config.py
+++ b/nova/tests/virt/libvirt/test_config.py
@@ -1166,6 +1166,47 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
""")
+ def test_config_8021Qbh_hostdev(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "hostdev"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.source_dev = "0000:0a:00.1"
+ obj.vporttype = "802.1Qbh"
+ obj.add_vport_param("profileid", "MyPortProfile")
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+
+
+
+
+
+
+
+
+ """)
+
+ def test_config_hw_veb_hostdev(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "hostdev"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.source_dev = "0000:0a:00.1"
+ obj.vlan = "100"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+
+
+
+
+
+
+
+
+ """)
+
class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py
index fe5e57f03e..d5901a75f4 100644
--- a/nova/tests/virt/libvirt/test_driver.py
+++ b/nova/tests/virt/libvirt/test_driver.py
@@ -43,6 +43,7 @@ from nova.compute import flavors
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
+from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
@@ -221,6 +222,9 @@ class FakeVirtDomain(object):
def attachDeviceFlags(self, xml, flags):
pass
+ def attachDevice(self, xml):
+ pass
+
def detachDeviceFlags(self, xml, flags):
pass
@@ -2663,6 +2667,7 @@ class LibvirtConnTestCase(test.TestCase,
address='0000:00:00.1',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
+ request_id=None,
extra_info=jsonutils.dumps({}))
db.pci_device_update(self.context, pci_device_info['compute_node_id'],
pci_device_info['address'], pci_device_info)
@@ -2701,6 +2706,7 @@ class LibvirtConnTestCase(test.TestCase,
address='0000:00:00.2',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
+ request_id=None,
extra_info=jsonutils.dumps({}))
db.pci_device_update(self.context, pci_device_info['compute_node_id'],
pci_device_info['address'], pci_device_info)
@@ -7123,6 +7129,63 @@ class LibvirtConnTestCase(test.TestCase,
shutdown_attempts=1,
succeeds=False)
+ @mock.patch.object(FakeVirtDomain, 'attachDevice')
+ @mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_attach_sriov_ports(self,
+ mock_get_image_metadata,
+ mock_ID,
+ mock_attachDevice):
+ instance = db.instance_create(self.context, self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._attach_sriov_ports(self.context, instance, domain, network_info)
+ mock_get_image_metadata.assert_called_once_with(self.context,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_attachDevice.called)
+
+ @mock.patch.object(FakeVirtDomain, 'attachDevice')
+ @mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_attach_sriov_ports_with_info_cache(self,
+ mock_get_image_metadata,
+ mock_ID,
+ mock_attachDevice):
+ instance = db.instance_create(self.context, self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ instance.info_cache.network_info = network_info
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._attach_sriov_ports(self.context, instance, domain, None)
+ mock_get_image_metadata.assert_called_once_with(self.context,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_attachDevice.called)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_has_min_version', return_value=True)
+ @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_detach_sriov_ports(self,
+ mock_get_image_metadata,
+ mock_detachDeviceFlags,
+ mock_has_min_version):
+ instance = db.instance_create(self.context, self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ instance.info_cache.network_info = network_info
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._detach_sriov_ports(instance, domain)
+ mock_get_image_metadata.assert_called_once_with(mock.ANY,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_detachDeviceFlags.called)
+
def test_resume(self):
dummyxml = ("instance-0000000a"
""
@@ -9870,7 +9933,6 @@ class IptablesFirewallTestCase(test.TestCase):
from nova.network import linux_net
linux_net.iptables_manager.execute = fake_iptables_execute
- from nova.compute import utils as compute_utils # noqa
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
diff --git a/nova/tests/virt/libvirt/test_vif.py b/nova/tests/virt/libvirt/test_vif.py
index 3c4e409c5c..e764fda695 100644
--- a/nova/tests/virt/libvirt/test_vif.py
+++ b/nova/tests/virt/libvirt/test_vif.py
@@ -178,16 +178,31 @@ class LibvirtVifTestCase(test.TestCase):
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBH,
- devname='tap-xxx-yyy-zzz',
+ vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
- qbh_params=network_model.VIF8021QbhParams(
- profileid="xxx-yyy-zzz"))
+ details={
+ network_model.VIF_DETAILS_PROFILEID:
+ 'MyPortProfile'},
+ profile={'pci_vendor_info': '1137:0043',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1'})
+
+ vif_hw_veb = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_8021,
+ type=network_model.VIF_TYPE_HW_VEB,
+ vnic_type=network_model.VNIC_TYPE_DIRECT,
+ ovs_interfaceid=None,
+ details={
+ network_model.VIF_DETAILS_VLAN: '100'},
+ profile={'pci_vendor_info': '1137:0043',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1'})
vif_8021qbg = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBG,
- devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None,
qbg_params=network_model.VIF8021QbgParams(
managerid="xxx-yyy-zzz",
@@ -310,6 +325,20 @@ class LibvirtVifTestCase(test.TestCase):
driver = node.find("driver").get("name")
self.assertEqual(driver, driver_want)
+ def _assertTypeAndPciEquals(self, node, type, vif):
+ self.assertEqual(node.get("type"), type)
+ address = node.find("source").find("address")
+ addr_type = address.get("type")
+ self.assertEqual("pci", addr_type)
+ pci_slot = "%(domain)s:%(bus)s:%(slot)s.%(func)s" % {
+ 'domain': address.get("domain")[2:],
+ 'bus': address.get("bus")[2:],
+ 'slot': address.get("slot")[2:],
+ 'func': address.get("function")[2:]}
+
+ pci_slot_want = vif['profile']['pci_slot']
+ self.assertEqual(pci_slot, pci_slot_want)
+
def _get_conf(self):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
@@ -455,7 +484,6 @@ class LibvirtVifTestCase(test.TestCase):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
self._test_model_qemu(
self.vif_bridge,
- self.vif_8021qbh,
self.vif_8021qbg,
self.vif_iovisor,
self.vif_mlnx,
@@ -468,7 +496,6 @@ class LibvirtVifTestCase(test.TestCase):
self.vif_bridge,
self.vif_ovs,
self.vif_ivs,
- self.vif_8021qbh,
self.vif_8021qbg,
self.vif_iovisor,
self.vif_mlnx,
@@ -851,21 +878,31 @@ class LibvirtVifTestCase(test.TestCase):
d = vif.LibvirtGenericVIFDriver(self._get_conn())
xml = self._get_instance_xml(d, self.vif_8021qbh)
node = self._get_node(xml)
- self._assertTypeEquals(node, "direct", "source", "dev", "eth0")
+ self._assertTypeAndPciEquals(node, "hostdev", self.vif_8021qbh)
self._assertMacEquals(node, self.vif_8021qbh)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbh")
profile_id_found = False
for p_elem in vp.findall("parameters"):
- wantparams = self.vif_8021qbh['qbh_params']
+ details = self.vif_8021qbh["details"]
profile_id = p_elem.get("profileid", None)
if profile_id:
self.assertEqual(profile_id,
- wantparams['profileid'])
+ details[network_model.VIF_DETAILS_PROFILEID])
profile_id_found = True
self.assertTrue(profile_id_found)
+ def test_hw_veb_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_hw_veb)
+ node = self._get_node(xml)
+ self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb)
+ self._assertMacEquals(node, self.vif_hw_veb)
+ vlan = node.find("vlan").find("tag").get("id")
+ vlan_want = self.vif_hw_veb["details"]["vlan"]
+ self.assertEqual(vlan, vlan_want)
+
def test_generic_iovisor_driver(self):
d = vif.LibvirtGenericVIFDriver(self._get_conn())
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 3f375bdb09..a7a4ac0e73 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -28,6 +28,7 @@ import time
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import units
+from nova.pci import pci_utils
from nova.virt import hardware
from lxml import etree
@@ -986,6 +987,11 @@ class LibvirtConfigGuestSnapshotDisk(LibvirtConfigObject):
elif self.source_type == 'network':
self.source_protocol = c.get('protocol')
self.source_name = c.get('name')
+ for sub in c.getchildren():
+ if sub.tag == 'host':
+ self.source_hosts.append(sub.get('name'))
+ self.source_ports.append(sub.get('port'))
+
elif c.tag == 'serial':
self.serial = c.text
@@ -1082,11 +1088,14 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
self.vif_outbound_peak = None
self.vif_outbound_burst = None
self.vif_outbound_average = None
+ self.vlan = None
def format_dom(self):
dev = super(LibvirtConfigGuestInterface, self).format_dom()
dev.set("type", self.net_type)
+ if self.net_type == "hostdev":
+ dev.set("managed", "yes")
dev.append(etree.Element("mac", address=self.mac_addr))
if self.model:
dev.append(etree.Element("model", type=self.model))
@@ -1100,9 +1109,26 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
elif self.net_type == "direct":
dev.append(etree.Element("source", dev=self.source_dev,
mode=self.source_mode))
+ elif self.net_type == "hostdev":
+ source_elem = etree.Element("source")
+ domain, bus, slot, func = \
+ pci_utils.get_pci_address_fields(self.source_dev)
+ addr_elem = etree.Element("address", type='pci')
+ addr_elem.set("domain", "0x%s" % (domain))
+ addr_elem.set("bus", "0x%s" % (bus))
+ addr_elem.set("slot", "0x%s" % (slot))
+ addr_elem.set("function", "0x%s" % (func))
+ source_elem.append(addr_elem)
+ dev.append(source_elem)
else:
dev.append(etree.Element("source", bridge=self.source_dev))
+ if self.vlan and self.net_type in ("direct", "hostdev"):
+ vlan_elem = etree.Element("vlan")
+ tag_elem = etree.Element("tag", id=self.vlan)
+ vlan_elem.append(tag_elem)
+ dev.append(vlan_elem)
+
if self.target_dev is not None:
dev.append(etree.Element("target", dev=self.target_dev))
diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py
index 25e2a1c401..8ed55a2564 100644
--- a/nova/virt/libvirt/designer.py
+++ b/nova/virt/libvirt/designer.py
@@ -19,6 +19,8 @@ This module provides helper APIs for populating the config.py
classes based on common operational needs / policies
"""
+from nova.pci import pci_utils
+
def set_vif_guest_frontend_config(conf, mac, model, driver):
"""Populate a LibvirtConfigGuestInterface instance
@@ -89,21 +91,46 @@ def set_vif_host_backend_802qbg_config(conf, devname, managerid,
conf.target_dev = tapname
-def set_vif_host_backend_802qbh_config(conf, devname, profileid,
+def set_vif_host_backend_802qbh_config(conf, net_type, devname, profileid,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbh device.
"""
- conf.net_type = "direct"
- conf.source_dev = devname
- conf.source_mode = "vepa"
+ conf.net_type = net_type
+ if net_type == 'direct':
+ conf.source_mode = 'passthrough'
+ conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
+ conf.driver_name = 'vhost'
+ else:
+ conf.source_dev = devname
+ conf.model = None
conf.vporttype = "802.1Qbh"
conf.add_vport_param("profileid", profileid)
if tapname:
conf.target_dev = tapname
+def set_vif_host_backend_hw_veb(conf, net_type, devname, vlan,
+ tapname=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for an device that supports hardware
+ virtual ethernet bridge.
+ """
+
+ conf.net_type = net_type
+ if net_type == 'direct':
+ conf.source_mode = 'passthrough'
+ conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
+ conf.driver_name = 'vhost'
+ else:
+ conf.source_dev = devname
+ conf.model = None
+ conf.vlan = vlan
+ if tapname:
+ conf.target_dev = tapname
+
+
def set_vif_host_backend_direct_config(conf, devname):
"""Populate a LibvirtConfigGuestInterface instance
with direct Interface.
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 2073e97e50..320e410a28 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -66,6 +66,7 @@ from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
+from nova.network import model as network_model
from nova import objects
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
@@ -1678,6 +1679,7 @@ class LibvirtDriver(driver.ComputeDriver):
if state == power_state.RUNNING or state == power_state.PAUSED:
self._detach_pci_devices(virt_dom,
pci_manager.get_instance_pci_devs(instance))
+ self._detach_sriov_ports(instance, virt_dom)
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(instance,
@@ -1717,6 +1719,7 @@ class LibvirtDriver(driver.ComputeDriver):
if new_dom is not None:
self._attach_pci_devices(new_dom,
pci_manager.get_instance_pci_devs(instance))
+ self._attach_sriov_ports(context, instance, new_dom)
LOG.info(_LI("Snapshot extracted, beginning image upload"),
instance=instance)
@@ -2273,7 +2276,7 @@ class LibvirtDriver(driver.ComputeDriver):
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
- pci_manager.get_instance_pci_devs(instance))
+ pci_manager.get_instance_pci_devs(instance, 'all'))
for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance["name"])
state = LIBVIRT_POWER_STATE[dom.info()[0]]
@@ -2354,7 +2357,7 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info, reboot=True,
vifs_already_plugged=True)
self._prepare_pci_devices_for_use(
- pci_manager.get_instance_pci_devs(instance))
+ pci_manager.get_instance_pci_devs(instance, 'all'))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
@@ -2472,6 +2475,7 @@ class LibvirtDriver(driver.ComputeDriver):
dom = self._lookup_by_name(instance['name'])
self._detach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
+ self._detach_sriov_ports(instance, dom)
dom.managedSave(0)
def resume(self, context, instance, network_info, block_device_info=None):
@@ -2483,6 +2487,7 @@ class LibvirtDriver(driver.ComputeDriver):
vifs_already_plugged=True)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
+ self._attach_sriov_ports(context, instance, dom, network_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
@@ -3174,6 +3179,70 @@ class LibvirtDriver(driver.ComputeDriver):
{'dev': pci_devs, 'dom': dom.ID()})
raise
+ def _prepare_args_for_get_config(self, context, instance):
+ flavor = objects.Flavor.get_by_id(context,
+ instance['instance_type_id'])
+ image_ref = instance['image_ref']
+ image_meta = compute_utils.get_image_metadata(
+ context, self._image_api, image_ref, instance)
+ return flavor, image_meta
+
+ @staticmethod
+ def _has_sriov_port(network_info):
+ for vif in network_info:
+ if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
+ return True
+ return False
+
+ def _attach_sriov_ports(self, context, instance, dom, network_info=None):
+ if network_info is None:
+ network_info = instance.info_cache.network_info
+ if network_info is None:
+ return
+
+ if self._has_sriov_port(network_info):
+ flavor, image_meta = self._prepare_args_for_get_config(context,
+ instance)
+ for vif in network_info:
+ if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
+ cfg = self.vif_driver.get_config(instance,
+ vif,
+ image_meta,
+ flavor,
+ CONF.libvirt.virt_type)
+ LOG.debug('Attaching SR-IOV port %(port)s to %(dom)s',
+ {'port': vif, 'dom': dom.ID()})
+ dom.attachDevice(cfg.to_xml())
+
+ def _detach_sriov_ports(self, instance, dom):
+ network_info = instance.info_cache.network_info
+ if network_info is None:
+ return
+
+ context = nova_context.get_admin_context()
+ if self._has_sriov_port(network_info):
+ # for libvirt version < 1.1.1, this is race condition
+ # so forbid detach if it's an older version
+ if not self._has_min_version(
+ MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
+ reason = (_("Detaching SR-IOV ports with"
+ " libvirt < %(ver)s is not permitted") %
+ {'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
+ raise exception.PciDeviceDetachFailed(reason=reason,
+ dev=network_info)
+
+ flavor, image_meta = self._prepare_args_for_get_config(context,
+ instance)
+ for vif in network_info:
+ if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
+ cfg = self.vif_driver.get_config(instance,
+ vif,
+ image_meta,
+ flavor,
+ CONF.libvirt.virt_type)
+ dom.detachDeviceFlags(cfg.to_xml(),
+ libvirt.VIR_DOMAIN_AFFECT_LIVE)
+
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
"""Enables / Disables the compute service on this host.
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 3abe5e8f57..71404f08ac 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -265,10 +265,34 @@ class LibvirtGenericVIFDriver(object):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
- params = vif["qbh_params"]
+ profile = vif["profile"]
+ vif_details = vif["details"]
+ net_type = 'direct'
+ if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
+ net_type = 'hostdev'
+
designer.set_vif_host_backend_802qbh_config(
- conf, vif['network'].get_meta('interface'),
- params['profileid'])
+ conf, net_type, profile['pci_slot'],
+ vif_details[network_model.VIF_DETAILS_PROFILEID])
+
+ designer.set_vif_bandwidth_config(conf, inst_type)
+
+ return conf
+
+ def get_config_hw_veb(self, instance, vif, image_meta,
+ inst_type, virt_type):
+ conf = self.get_base_config(instance, vif, image_meta,
+ inst_type, virt_type)
+
+ profile = vif["profile"]
+ vif_details = vif["details"]
+ net_type = 'direct'
+ if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
+ net_type = 'hostdev'
+
+ designer.set_vif_host_backend_hw_veb(
+ conf, net_type, profile['pci_slot'],
+ vif_details[network_model.VIF_DETAILS_VLAN])
designer.set_vif_bandwidth_config(conf, inst_type)
@@ -459,6 +483,9 @@ class LibvirtGenericVIFDriver(object):
def plug_802qbh(self, instance, vif):
pass
+ def plug_hw_veb(self, instance, vif):
+ pass
+
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
@@ -604,6 +631,9 @@ class LibvirtGenericVIFDriver(object):
def unplug_802qbh(self, instance, vif):
pass
+ def unplug_hw_veb(self, instance, vif):
+ pass
+
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port