Merge from trunk.

This commit is contained in:
Naveed Massjouni
2011-07-11 15:13:37 -04:00
159 changed files with 8755 additions and 3353 deletions
+5
View File
@@ -47,3 +47,8 @@
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu>
<vishvananda@gmail.com> <vishvananda@yahoo.com>
<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
<ilyaalekseyev@acm.org> <ilya@oscloud.ru>
<reldan@oscloud.ru> <enugaev@griddynamics.com>
<kshileev@gmail.com> <kshileev@griddynamics.com>
<nsokolov@griddynamics.com> <nsokolov@griddynamics.net>
+6 -2
View File
@@ -20,16 +20,17 @@ Dan Prince <dan.prince@rackspace.com>
Dave Walker <DaveWalker@ubuntu.com>
David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
Devendra Modium <dmodium@isi.edu>
Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com>
Eldar Nugaev <enugaev@griddynamics.com>
Eldar Nugaev <reldan@oscloud.ru>
Eric Day <eday@oddments.org>
Eric Windisch <eric@cloudscaling.com>
Ewan Mellor <ewan.mellor@citrix.com>
Gabe Westmaas <gabe.westmaas@rackspace.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com>
Ilya Alekseyev <ialekseev@griddynamics.com>
Ilya Alekseyev <ilyaalekseyev@acm.org>
Isaku Yamahata <yamahata@valinux.co.jp>
Jason Cannavale <jason.cannavale@rackspace.com>
Jason Koelker <jason@koelker.net>
@@ -43,6 +44,7 @@ John Dewey <john@dewey.ws>
John Tran <jtran@attinteractive.com>
Jonathan Bryce <jbryce@jbryce.com>
Jordan Rinke <jordan@openstack.org>
Joseph Suh <jsuh@isi.edu>
Josh Durgin <joshd@hq.newdream.net>
Josh Kearney <josh@jk0.org>
Josh Kleinpeter <josh@kleinpeter.org>
@@ -53,6 +55,7 @@ Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
Kevin Bringard <kbringard@attinteractive.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Kirill Shileev <kshileev@gmail.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
Lvov Maxim <usrleon@gmail.com>
@@ -67,6 +70,7 @@ MORITA Kazutaka <morita.kazutaka@gmail.com>
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
Naveed Massjouni <naveedm9@gmail.com>
Nikolay Sokolov <nsokolov@griddynamics.com>
Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
Paul Voccio <paul@openstack.org>
Renuka Apte <renuka.apte@citrix.com>
+1
View File
@@ -23,6 +23,7 @@ include nova/compute/interfaces.template
include nova/console/xvp.conf.template
include nova/db/sqlalchemy/migrate_repo/migrate.cfg
include nova/db/sqlalchemy/migrate_repo/README
include nova/db/sqlalchemy/migrate_repo/versions/*.sql
include nova/virt/interfaces.template
include nova/virt/libvirt*.xml.template
include nova/virt/cpuinfo.xml.template
+116
View File
@@ -0,0 +1,116 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cron script to generate usage notifications for instances neither created
nor destroyed in a given time period.
Together with the notifications generated by compute on instance
create/delete/resize, over that ime period, this allows an external
system consuming usage notification feeds to calculate instance usage
for each tenant.
Time periods are specified like so:
<number>[mdy]
1m = previous month. If the script is run April 1, it will generate usages
for March 1 thry March 31.
3m = 3 previous months.
90d = previous 90 days.
1y = previous year. If run on Jan 1, it generates usages for
Jan 1 thru Dec 31 of the previous year.
"""
import datetime
import gettext
import os
import sys
import time
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.notifier import api as notifier_api
FLAGS = flags.FLAGS
flags.DEFINE_string('instance_usage_audit_period', '1m',
'time period to generate instance usages for.')
def time_period(period):
today = datetime.date.today()
unit = period[-1]
if unit not in 'mdy':
raise ValueError('Time period must be m, d, or y')
n = int(period[:-1])
if unit == 'm':
year = today.year - (n // 12)
n = n % 12
if n >= today.month:
year -= 1
month = 12 + (today.month - n)
else:
month = today.month - n
begin = datetime.datetime(day=1, month=month, year=year)
end = datetime.datetime(day=1, month=today.month, year=today.year)
elif unit == 'y':
begin = datetime.datetime(day=1, month=1, year=today.year - n)
end = datetime.datetime(day=1, month=1, year=today.year)
elif unit == 'd':
b = today - datetime.timedelta(days=n)
begin = datetime.datetime(day=b.day, month=b.month, year=b.year)
end = datetime.datetime(day=today.day,
month=today.month,
year=today.year)
return (begin, end)
if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
begin, end = time_period(FLAGS.instance_usage_audit_period)
print "Creating usages for %s until %s" % (str(begin), str(end))
instances = db.instance_get_active_by_window(context.get_admin_context(),
begin,
end)
print "%s instances" % len(instances)
for instance_ref in instances:
usage_info = utils.usage_from_instance(instance_ref,
audit_period_begining=str(begin),
audit_period_ending=str(end))
notifier_api.notify('compute.%s' % FLAGS.host,
'compute.instance.exists',
notifier_api.INFO,
usage_info)
+3 -2
View File
@@ -137,8 +137,9 @@ if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
server = wsgi.Server()
acp_port = FLAGS.ajax_console_proxy_port
acp = AjaxConsoleProxy()
acp.register_listeners()
server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0')
server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port)
server.start()
server.wait()
+32 -29
View File
@@ -1,5 +1,4 @@
#!/usr/bin/env python
# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -18,44 +17,48 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova API."""
"""Starter script for Nova API.
Starts both the EC2 and OpenStack APIs in separate processes.
"""
import gettext
import os
import signal
import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
sys.argv[0]), os.pardir, os.pardir))
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
import nova.service
import nova.utils
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
from nova import version
from nova import wsgi
LOG = logging.getLogger('nova.api')
FLAGS = flags.FLAGS
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
LOG.audit(_("Starting nova-api node (version %s)"),
version.version_string_with_vcs())
LOG.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
flag_get = FLAGS.get(flag, None)
LOG.debug("%(flag)s : %(flag_get)s" % locals())
service = service.serve_wsgi(service.ApiService)
service.wait()
def main():
"""Launch EC2 and OSAPI services."""
nova.utils.Bootstrapper.bootstrap_binary(sys.argv)
launcher = nova.service.Launcher()
for api in FLAGS.enabled_apis:
service = nova.service.WSGIService(api)
launcher.launch_service(service)
signal.signal(signal.SIGTERM, lambda *_: launcher.stop())
try:
launcher.wait()
except KeyboardInterrupt:
launcher.stop()
if __name__ == '__main__':
sys.exit(main())
+2 -6
View File
@@ -59,14 +59,12 @@ def add_lease(mac, ip_address, _hostname, _interface):
LOG.debug(_("leasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.lease_fixed_ip(context.get_admin_context(),
mac,
ip_address)
else:
rpc.cast(context.get_admin_context(),
"%s.%s" % (FLAGS.network_topic, FLAGS.host),
{"method": "lease_fixed_ip",
"args": {"mac": mac,
"address": ip_address}})
"args": {"address": ip_address}})
def old_lease(mac, ip_address, hostname, interface):
@@ -81,14 +79,12 @@ def del_lease(mac, ip_address, _hostname, _interface):
LOG.debug(_("releasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.release_fixed_ip(context.get_admin_context(),
mac,
ip_address)
else:
rpc.cast(context.get_admin_context(),
"%s.%s" % (FLAGS.network_topic, FLAGS.host),
{"method": "release_fixed_ip",
"args": {"mac": mac,
"address": ip_address}})
"args": {"address": ip_address}})
def init_leases(interface):
+5 -2
View File
@@ -93,6 +93,9 @@ if __name__ == '__main__':
with_req = direct.PostParamsMiddleware(with_json)
with_auth = direct.DelegatedAuthMiddleware(with_req)
server = wsgi.Server()
server.start(with_auth, FLAGS.direct_port, host=FLAGS.direct_host)
server = wsgi.Server("Direct API",
with_auth,
host=FLAGS.direct_host,
port=FLAGS.direct_port)
server.start()
server.wait()
+58 -31
View File
@@ -56,11 +56,11 @@
import gettext
import glob
import json
import netaddr
import os
import sys
import time
import IPy
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -172,17 +172,23 @@ class VpnCommands(object):
def change(self, project_id, ip, port):
"""Change the ip and port for a vpn.
this will update all networks associated with a project
not sure if that's the desired behavior or not, patches accepted
args: project, ip, port"""
# TODO(tr3buchet): perhaps this shouldn't update all networks
# associated with a project in the future
project = self.manager.get_project(project_id)
if not project:
print 'No project %s' % (project_id)
return
admin = context.get_admin_context()
network_ref = db.project_get_network(admin, project_id)
db.network_update(admin,
network_ref['id'],
{'vpn_public_address': ip,
'vpn_public_port': int(port)})
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
db.network_update(admin_context,
network['id'],
{'vpn_public_address': ip,
'vpn_public_port': int(port)})
class ShellCommands(object):
@@ -446,12 +452,13 @@ class ProjectCommands(object):
def scrub(self, project_id):
"""Deletes data associated with project
arguments: project_id"""
ctxt = context.get_admin_context()
network_ref = db.project_get_network(ctxt, project_id)
db.network_disassociate(ctxt, network_ref['id'])
groups = db.security_group_get_by_project(ctxt, project_id)
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
db.network_disassociate(admin_context, network['id'])
groups = db.security_group_get_by_project(admin_context, project_id)
for group in groups:
db.security_group_destroy(ctxt, group['id'])
db.security_group_destroy(admin_context, group['id'])
def zipfile(self, project_id, user_id, filename='nova.zip'):
"""Exports credentials for project to a zip file
@@ -505,7 +512,7 @@ class FixedIpCommands(object):
instance = fixed_ip['instance']
hostname = instance['hostname']
host = instance['host']
mac_address = instance['mac_address']
mac_address = fixed_ip['mac_address']['address']
print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (
fixed_ip['network']['cidr'],
fixed_ip['address'],
@@ -515,24 +522,24 @@ class FixedIpCommands(object):
class FloatingIpCommands(object):
"""Class for managing floating ip."""
def create(self, host, range):
"""Creates floating ips for host by range
arguments: host ip_range"""
for address in IPy.IP(range):
def create(self, range):
"""Creates floating ips for zone by range
arguments: ip_range"""
for address in netaddr.IPNetwork(range):
db.floating_ip_create(context.get_admin_context(),
{'address': str(address),
'host': host})
{'address': str(address)})
def delete(self, ip_range):
"""Deletes floating ips by range
arguments: range"""
for address in IPy.IP(ip_range):
for address in netaddr.IPNetwork(ip_range):
db.floating_ip_destroy(context.get_admin_context(),
str(address))
def list(self, host=None):
"""Lists all floating ips (optionally by host)
arguments: [host]"""
arguments: [host]
Note: if host is given, only active floating IPs are returned"""
ctxt = context.get_admin_context()
if host is None:
floating_ips = db.floating_ip_get_all(ctxt)
@@ -550,10 +557,23 @@ class FloatingIpCommands(object):
class NetworkCommands(object):
"""Class for managing networks."""
def create(self, fixed_range=None, num_networks=None, network_size=None,
vlan_start=None, vpn_start=None, fixed_range_v6=None,
gateway_v6=None, label='public'):
"""Creates fixed ips for host by range"""
def create(self, label=None, fixed_range=None, num_networks=None,
network_size=None, vlan_start=None,
vpn_start=None, fixed_range_v6=None, gateway_v6=None,
flat_network_bridge=None, bridge_interface=None):
"""Creates fixed ips for host by range
arguments: label, fixed_range, [num_networks=FLAG],
[network_size=FLAG], [vlan_start=FLAG],
[vpn_start=FLAG], [fixed_range_v6=FLAG], [gateway_v6=FLAG],
[flat_network_bridge=FLAG], [bridge_interface=FLAG]
If you wish to use a later argument fill in the gaps with 0s
Ex: network create private 10.0.0.0/8 1 15 0 0 0 0 xenbr1 eth1
network create private 10.0.0.0/8 1 15
"""
if not label:
msg = _('a label (ex: public) is required to create networks.')
print msg
raise TypeError(msg)
if not fixed_range:
msg = _('Fixed range in the form of 10.0.0.0/8 is '
'required to create networks.')
@@ -569,11 +589,17 @@ class NetworkCommands(object):
vpn_start = FLAGS.vpn_start
if not fixed_range_v6:
fixed_range_v6 = FLAGS.fixed_range_v6
if not flat_network_bridge:
flat_network_bridge = FLAGS.flat_network_bridge
if not bridge_interface:
bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface
if not gateway_v6:
gateway_v6 = FLAGS.gateway_v6
net_manager = utils.import_object(FLAGS.network_manager)
try:
net_manager.create_networks(context.get_admin_context(),
label=label,
cidr=fixed_range,
num_networks=int(num_networks),
network_size=int(network_size),
@@ -581,7 +607,8 @@ class NetworkCommands(object):
vpn_start=int(vpn_start),
cidr_v6=fixed_range_v6,
gateway_v6=gateway_v6,
label=label)
bridge=flat_network_bridge,
bridge_interface=bridge_interface)
except ValueError, e:
print e
raise e
@@ -617,7 +644,7 @@ class VmCommands(object):
:param host: show all instance on specified host.
:param instance: show specificed instance.
"""
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \
" %-10s %-10s %-10s %-5s" % (
_('instance'),
_('node'),
@@ -639,14 +666,14 @@ class VmCommands(object):
context.get_admin_context(), host)
for instance in instances:
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \
" %-10s %-10s %-10s %-5d" % (
instance['hostname'],
instance['host'],
instance['instance_type'],
instance['instance_type'].name,
instance['state_description'],
instance['launched_at'],
instance['image_id'],
instance['image_ref'],
instance['kernel_id'],
instance['ramdisk_id'],
instance['project_id'],
@@ -878,7 +905,7 @@ class InstanceTypeCommands(object):
try:
instance_types.create(name, memory, vcpus, local_gb,
flavorid, swap, rxtx_quota, rxtx_cap)
except exception.InvalidInput:
except exception.InvalidInput, e:
print "Must supply valid parameters to create instance_type"
print e
sys.exit(1)
+5 -2
View File
@@ -50,6 +50,9 @@ if __name__ == '__main__':
FLAGS(sys.argv)
logging.setup()
router = s3server.S3Application(FLAGS.buckets_path)
server = wsgi.Server()
server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
server = wsgi.Server("S3 Objectstore",
router,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
server.start()
server.wait()
+20 -2
View File
@@ -63,6 +63,19 @@ flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
def handle_flash_socket_policy(socket):
LOG.info(_("Received connection on flash socket policy port"))
fd = socket.makefile('rw')
expected_command = "<policy-file-request/>"
if expected_command in fd.read(len(expected_command) + 1):
LOG.info(_("Received valid flash socket policy request"))
fd.write('<?xml version="1.0"?><cross-domain-policy><allow-'
'access-from domain="*" to-ports="%d" /></cross-'
'domain-policy>' % (FLAGS.vncproxy_port))
fd.flush()
socket.close()
if __name__ == "__main__":
utils.default_flagfile()
FLAGS(sys.argv)
@@ -96,6 +109,11 @@ if __name__ == "__main__":
service.serve()
server = wsgi.Server()
server.start(with_auth, FLAGS.vncproxy_port, host=FLAGS.vncproxy_host)
server = wsgi.Server("VNC Proxy",
with_auth,
host=FLAGS.vncproxy_host,
port=FLAGS.vncproxy_port)
server.start()
server.start_tcp(handle_flash_socket_policy, 843, host=FLAGS.vncproxy_host)
server.wait()
+13 -7
View File
@@ -17,7 +17,7 @@ if [ ! -n "$HOST_IP" ]; then
HOST_IP=`LC_ALL=C ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
fi
USE_MYSQL=${USE_MYSQL:-0}
USE_MYSQL=${USE_MYSQL:-1}
INTERFACE=${INTERFACE:-eth0}
FLOATING_RANGE=${FLOATING_RANGE:-10.6.0.0/27}
FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
@@ -159,10 +159,6 @@ NOVA_CONF_EOF
mkdir -p $NOVA_DIR/instances
rm -rf $NOVA_DIR/networks
mkdir -p $NOVA_DIR/networks
if [ ! -d "$NOVA_DIR/images" ]; then
ln -s $DIR/images $NOVA_DIR/images
fi
if [ "$TEST" == 1 ]; then
cd $NOVA_DIR
python $NOVA_DIR/run_tests.py
@@ -181,8 +177,18 @@ NOVA_CONF_EOF
# create some floating ips
$NOVA_DIR/bin/nova-manage floating create `hostname` $FLOATING_RANGE
# convert old images
$NOVA_DIR/bin/nova-manage image convert $DIR/images
if [ ! -d "$NOVA_DIR/images" ]; then
if [ ! -d "$DIR/converted-images" ]; then
# convert old images
mkdir $DIR/converted-images
ln -s $DIR/converted-images $NOVA_DIR/images
$NOVA_DIR/bin/nova-manage image convert $DIR/images
else
ln -s $DIR/converted-images $NOVA_DIR/images
fi
fi
# nova api crashes if we start it with a regular screen command,
# so send the start command by forcing text into the window.
-4
View File
@@ -1,4 +0,0 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 2a2fe6198f4be4a4d6f289b09d16d74a
tags: fbb0d17656682115ca4d033fb2f83ba1
+39
View File
@@ -0,0 +1,39 @@
MultiNic
========
What is it
----------
Multinic allows an instance to have more than one vif connected to it. Each vif is representative of a separate network with its own IP block.
Managers
--------
Each of the network managers are designed to run independently of the compute manager. They expose a common API for the compute manager to call to determine and configure the network(s) for an instance. Direct calls to either the network api or especially the DB should be avoided by the virt layers.
On startup a manager looks in the networks table for networks it is assigned and configures itself to support that network. Using the periodic task, they will claim new networks that have no host set. Only one network per network-host will be claimed at a time. This allows for psuedo-loadbalancing if there are multiple network-hosts running.
Flat Manager
------------
.. image:: /images/multinic_flat.png
The Flat manager is most similar to a traditional switched network environment. It assumes that the IP routing, DNS, DHCP (possibly) and bridge creation is handled by something else. That is it makes no attempt to configure any of this. It does keep track of a range of IPs for the instances that are connected to the network to be allocated.
Each instance will get a fixed IP from each network's pool. The guest operating system may be configured to gather this information through an agent or by the hypervisor injecting the files, or it may ignore it completely and come up with only a layer 2 connection.
Flat manager requires at least one nova-network process running that will listen to the API queue and respond to queries. It does not need to sit on any of the networks but it does keep track of the IPs it hands out to instances.
FlatDHCP Manager
----------------
.. image:: /images/multinic_dhcp.png
FlatDHCP manager builds on the the Flat manager adding dnsmask (DNS and DHCP) and radvd (Router Advertisement) servers on the bridge for that network. The services run on the host that is assigned to that nework. The FlatDHCP manager will create its bridge as specified when the network was created on the network-host when the network host starts up or when a new network gets allocated to that host. Compute nodes will also create the bridges as necessary and connect instance VIFs to them.
VLAN Manager
------------
.. image:: /images/multinic_vlan.png
The VLAN manager sets up forwarding to/from a cloudpipe instance in addition to providing dnsmask (DNS and DHCP) and radvd (Router Advertisement) services for each network. The manager will create its bridge as specified when the network was created on the network-host when the network host starts up or when a new network gets allocated to that host. Compute nodes will also create the bridges as necessary and conenct instance VIFs to them.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

+5
View File
@@ -30,3 +30,8 @@
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
import gettext
gettext.install("nova", unicode=1)
+20
View File
@@ -369,3 +369,23 @@ class AdminController(object):
raise exception.ApiError(_('Duplicate rule'))
self.compute_api.trigger_provider_fw_rules_refresh(context)
return {'status': 'OK', 'message': 'Added %s rules' % rules_added}
def describe_external_address_blocks(self, context):
blocks = db.provider_fw_rule_get_all(context)
# NOTE(todd): use a set since we have icmp/udp/tcp rules with same cidr
blocks = set([b.cidr for b in blocks])
blocks = [{'cidr': b} for b in blocks]
return {'externalIpBlockInfo':
list(sorted(blocks, key=lambda k: k['cidr']))}
def remove_external_address_block(self, context, cidr):
LOG.audit(_('Removing ip block from %s'), cidr, context=context)
cidr = urllib.unquote(cidr).decode()
# raise if invalid
netaddr.IPNetwork(cidr)
rules = db.provider_fw_rule_get_all_by_cidr(context, cidr)
for rule in rules:
db.provider_fw_rule_destroy(context, rule['id'])
if rules:
self.compute_api.trigger_provider_fw_rules_refresh(context)
return {'status': 'OK', 'message': 'Deleted %s rules' % len(rules)}
+92 -42
View File
@@ -23,7 +23,7 @@ datastore.
"""
import base64
import IPy
import netaddr
import os
import urllib
import tempfile
@@ -86,8 +86,7 @@ class CloudController(object):
self.volume_api = volume.API()
self.compute_api = compute.API(
network_api=self.network_api,
volume_api=self.volume_api,
hostname_factory=ec2utils.id_to_ec2_id)
volume_api=self.volume_api)
self.setup()
def __str__(self):
@@ -121,8 +120,8 @@ class CloudController(object):
result = {}
for instance in self.compute_api.get_all(context,
project_id=project_id):
if instance['fixed_ip']:
line = '%s slots=%d' % (instance['fixed_ip']['address'],
if instance['fixed_ips']:
line = '%s slots=%d' % (instance['fixed_ips'][0]['address'],
instance['vcpus'])
key = str(instance['key_name'])
if key in result:
@@ -152,7 +151,7 @@ class CloudController(object):
# This ensures that all attributes of the instance
# are populated.
instance_ref = db.instance_get(ctxt, instance_ref['id'])
instance_ref = db.instance_get(ctxt, instance_ref[0]['id'])
mpi = self._get_mpi_data(ctxt, instance_ref['project_id'])
if instance_ref['key_name']:
@@ -167,6 +166,9 @@ class CloudController(object):
instance_ref['id'])
ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
image_ec2_id = self.image_ec2_id(instance_ref['image_ref'])
security_groups = db.security_group_get_by_instance(ctxt,
instance_ref['id'])
security_groups = [x['name'] for x in security_groups]
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
@@ -190,7 +192,7 @@ class CloudController(object):
'public-ipv4': floating_ip or '',
'public-keys': keys,
'reservation-id': instance_ref['reservation_id'],
'security-groups': '',
'security-groups': security_groups,
'mpi': mpi}}
for image_type in ['kernel', 'ramdisk']:
@@ -391,15 +393,21 @@ class CloudController(object):
pass
return True
def describe_security_groups(self, context, group_name=None, **kwargs):
def describe_security_groups(self, context, group_name=None, group_id=None,
**kwargs):
self.compute_api.ensure_default_security_group(context)
if group_name:
if group_name or group_id:
groups = []
for name in group_name:
group = db.security_group_get_by_name(context,
context.project_id,
name)
groups.append(group)
if group_name:
for name in group_name:
group = db.security_group_get_by_name(context,
context.project_id,
name)
groups.append(group)
if group_id:
for gid in group_id:
group = db.security_group_get(context, gid)
groups.append(group)
elif context.is_admin:
groups = db.security_group_get_all(context)
else:
@@ -452,7 +460,7 @@ class CloudController(object):
elif cidr_ip:
# If this fails, it throws an exception. This is what we want.
cidr_ip = urllib.unquote(cidr_ip).decode()
IPy.IP(cidr_ip)
netaddr.IPNetwork(cidr_ip)
values['cidr'] = cidr_ip
else:
values['cidr'] = '0.0.0.0/0'
@@ -497,13 +505,26 @@ class CloudController(object):
return True
return False
def revoke_security_group_ingress(self, context, group_name, **kwargs):
LOG.audit(_("Revoke security group ingress %s"), group_name,
context=context)
def revoke_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
if not group_name and not group_id:
err = "Not enough parameters, need group_name or group_id"
raise exception.ApiError(_(err))
self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
notfound = exception.SecurityGroupNotFound
if group_name:
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
if not security_group:
raise notfound(security_group_id=group_name)
if group_id:
security_group = db.security_group_get(context, group_id)
if not security_group:
raise notfound(security_group_id=group_id)
msg = "Revoke security group ingress %s"
LOG.audit(_(msg), security_group['name'], context=context)
criteria = self._revoke_rule_args_to_dict(context, **kwargs)
if criteria is None:
@@ -518,7 +539,7 @@ class CloudController(object):
if match:
db.security_group_rule_destroy(context, rule['id'])
self.compute_api.trigger_security_group_rules_refresh(context,
security_group['id'])
security_group_id=security_group['id'])
return True
raise exception.ApiError(_("No rule for the specified parameters."))
@@ -526,14 +547,26 @@ class CloudController(object):
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name, **kwargs):
LOG.audit(_("Authorize security group ingress %s"), group_name,
context=context)
def authorize_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
if not group_name and not group_id:
err = "Not enough parameters, need group_name or group_id"
raise exception.ApiError(_(err))
self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
notfound = exception.SecurityGroupNotFound
if group_name:
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
if not security_group:
raise notfound(security_group_id=group_name)
if group_id:
security_group = db.security_group_get(context, group_id)
if not security_group:
raise notfound(security_group_id=group_id)
msg = "Authorize security group ingress %s"
LOG.audit(_(msg), security_group['name'], context=context)
values = self._revoke_rule_args_to_dict(context, **kwargs)
if values is None:
raise exception.ApiError(_("Not enough parameters to build a "
@@ -547,7 +580,7 @@ class CloudController(object):
security_group_rule = db.security_group_rule_create(context, values)
self.compute_api.trigger_security_group_rules_refresh(context,
security_group['id'])
security_group_id=security_group['id'])
return True
@@ -583,11 +616,23 @@ class CloudController(object):
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
def delete_security_group(self, context, group_name, **kwargs):
def delete_security_group(self, context, group_name=None, group_id=None,
**kwargs):
if not group_name and not group_id:
err = "Not enough parameters, need group_name or group_id"
raise exception.ApiError(_(err))
notfound = exception.SecurityGroupNotFound
if group_name:
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
if not security_group:
raise notfound(security_group_id=group_name)
elif group_id:
security_group = db.security_group_get(context, group_id)
if not security_group:
raise notfound(security_group_id=group_id)
LOG.audit(_("Delete security group %s"), group_name, context=context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
db.security_group_destroy(context, security_group.id)
return True
@@ -793,15 +838,15 @@ class CloudController(object):
'name': instance['state_description']}
fixed_addr = None
floating_addr = None
if instance['fixed_ip']:
fixed_addr = instance['fixed_ip']['address']
if instance['fixed_ip']['floating_ips']:
fixed = instance['fixed_ip']
if instance['fixed_ips']:
fixed = instance['fixed_ips'][0]
fixed_addr = fixed['address']
if fixed['floating_ips']:
floating_addr = fixed['floating_ips'][0]['address']
if instance['fixed_ip']['network'] and 'use_v6' in kwargs:
if fixed['network'] and 'use_v6' in kwargs:
i['dnsNameV6'] = ipv6.to_global(
instance['fixed_ip']['network']['cidr_v6'],
instance['mac_address'],
fixed['network']['cidr_v6'],
fixed['virtual_interface']['address'],
instance['project_id'])
i['privateDnsName'] = fixed_addr
@@ -877,7 +922,8 @@ class CloudController(object):
public_ip = self.network_api.allocate_floating_ip(context)
return {'publicIp': public_ip}
except rpc.RemoteError as ex:
if ex.exc_type == 'NoMoreAddresses':
# NOTE(tr3buchet) - why does this block exist?
if ex.exc_type == 'NoMoreFloatingIps':
raise exception.NoMoreFloatingIps()
else:
raise
@@ -1045,12 +1091,16 @@ class CloudController(object):
def _get_image(self, context, ec2_id):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
return self.image_service.show(context, internal_id)
image = self.image_service.show(context, internal_id)
except (exception.InvalidEc2Id, exception.ImageNotFound):
try:
return self.image_service.show_by_name(context, ec2_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=ec2_id)
image_type = ec2_id.split('-')[0]
if self._image_type(image.get('container_format')) != image_type:
raise exception.ImageNotFound(image_id=ec2_id)
return image
def _format_image(self, image):
"""Convert from format defined by BaseImageService to S3 format."""
+4 -2
View File
@@ -35,6 +35,9 @@ FLAGS = flags.FLAGS
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata from the EC2 API."""
def __init__(self):
self.cc = cloud.CloudController()
def print_data(self, data):
if isinstance(data, dict):
output = ''
@@ -68,12 +71,11 @@ class MetadataRequestHandler(wsgi.Application):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
cc = cloud.CloudController()
remote_address = req.remote_addr
if FLAGS.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
try:
meta_data = cc.get_metadata(remote_address)
meta_data = self.cc.get_metadata(remote_address)
except Exception:
LOG.exception(_('Failed to get metadata for ip: %s'),
remote_address)
+3 -3
View File
@@ -87,8 +87,8 @@ def create_resource():
},
}
serializers = {
body_serializers = {
'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
}
return wsgi.Resource(Controller(), serializers=serializers)
serializer = wsgi.ResponseSerializer(body_serializers)
return wsgi.Resource(Controller(), serializer=serializer)
+7 -6
View File
@@ -34,20 +34,20 @@ class Controller(object):
def __init__(self):
pass
def index(self, req, server_id):
def index(self, req, server_id, **kwargs):
""" Returns the list of backup schedules for a given instance """
return faults.Fault(exc.HTTPNotImplemented())
def show(self, req, server_id, id):
def show(self, req, server_id, id, **kwargs):
""" Returns a single backup schedule for a given instance """
return faults.Fault(exc.HTTPNotImplemented())
def create(self, req, server_id, body):
def create(self, req, server_id, **kwargs):
""" No actual update method required, since the existing API allows
both create and update through a POST """
return faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, server_id, id):
def delete(self, req, server_id, id, **kwargs):
""" Deletes an existing backup schedule """
return faults.Fault(exc.HTTPNotImplemented())
@@ -59,9 +59,10 @@ def create_resource():
},
}
serializers = {
body_serializers = {
'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10,
metadata=metadata),
}
return wsgi.Resource(Controller(), serializers=serializers)
serializer = wsgi.ResponseSerializer(body_serializers)
return wsgi.Resource(Controller(), serializer=serializer)
+26 -19
View File
@@ -45,23 +45,20 @@ def get_pagination_params(request):
exc.HTTPBadRequest() exceptions to be raised.
"""
try:
marker = int(request.GET.get('marker', 0))
except ValueError:
raise webob.exc.HTTPBadRequest(_('marker param must be an integer'))
params = {}
for param in ['marker', 'limit']:
if not param in request.GET:
continue
try:
params[param] = int(request.GET[param])
except ValueError:
msg = _('%s param must be an integer') % param
raise webob.exc.HTTPBadRequest(msg)
if params[param] < 0:
msg = _('%s param must be positive') % param
raise webob.exc.HTTPBadRequest(msg)
try:
limit = int(request.GET.get('limit', 0))
except ValueError:
raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
if limit < 0:
raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
if marker < 0:
raise webob.exc.HTTPBadRequest(_('marker param must be positive'))
return(marker, limit)
return params
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
@@ -100,10 +97,10 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit):
def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
(marker, limit) = get_pagination_params(request)
params = get_pagination_params(request)
if limit == 0:
limit = max_limit
limit = params.get('limit', max_limit)
marker = params.get('marker')
limit = min(max_limit, limit)
start_index = 0
@@ -137,3 +134,13 @@ def get_id_from_href(href):
except:
LOG.debug(_("Error extracting id from href: %s") % href)
raise webob.exc.HTTPBadRequest(_('could not parse id from href'))
def remove_version_from_href(base_url):
"""Removes the api version from the href.
Given: 'http://www.nova.com/v1.1/123'
Returns: 'http://www.nova.com/123'
"""
return base_url.rsplit('/', 1).pop(0)
+1 -11
View File
@@ -90,14 +90,4 @@ class Controller(object):
def create_resource():
metadata = {
'attributes': {
'console': [],
},
}
serializers = {
'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
}
return wsgi.Resource(Controller(), serializers=serializers)
return wsgi.Resource(Controller())
@@ -0,0 +1,126 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" The instance type extra specs extension"""
from webob import exc
from nova import db
from nova import quota
from nova.api.openstack import extensions
from nova.api.openstack import faults
from nova.api.openstack import wsgi
class FlavorExtraSpecsController(object):
""" The flavor extra specs API controller for the Openstack API """
def _get_extra_specs(self, context, flavor_id):
extra_specs = db.api.instance_type_extra_specs_get(context, flavor_id)
specs_dict = {}
for key, value in extra_specs.iteritems():
specs_dict[key] = value
return dict(extra_specs=specs_dict)
def _check_body(self, body):
if body == None or body == "":
expl = _('No Request Body')
raise exc.HTTPBadRequest(explanation=expl)
def index(self, req, flavor_id):
""" Returns the list of extra specs for a givenflavor """
context = req.environ['nova.context']
return self._get_extra_specs(context, flavor_id)
def create(self, req, flavor_id, body):
self._check_body(body)
context = req.environ['nova.context']
specs = body.get('extra_specs')
try:
db.api.instance_type_extra_specs_update_or_create(context,
flavor_id,
specs)
except quota.QuotaError as error:
self._handle_quota_error(error)
return body
def update(self, req, flavor_id, id, body):
self._check_body(body)
context = req.environ['nova.context']
if not id in body:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(body) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
try:
db.api.instance_type_extra_specs_update_or_create(context,
flavor_id,
body)
except quota.QuotaError as error:
self._handle_quota_error(error)
return body
def show(self, req, flavor_id, id):
""" Return a single extra spec item """
context = req.environ['nova.context']
specs = self._get_extra_specs(context, flavor_id)
if id in specs['extra_specs']:
return {id: specs['extra_specs'][id]}
else:
return faults.Fault(exc.HTTPNotFound())
def delete(self, req, flavor_id, id):
""" Deletes an existing extra spec """
context = req.environ['nova.context']
db.api.instance_type_extra_specs_delete(context, flavor_id, id)
def _handle_quota_error(self, error):
"""Reraise quota errors as api-specific http exceptions."""
if error.code == "MetadataLimitExceeded":
raise exc.HTTPBadRequest(explanation=error.message)
raise error
class Flavorextraspecs(extensions.ExtensionDescriptor):
def get_name(self):
return "FlavorExtraSpecs"
def get_alias(self):
return "os-flavor-extra-specs"
def get_description(self):
return "Instance type (flavor) extra specs"
def get_namespace(self):
return \
"http://docs.openstack.org/ext/flavor_extra_specs/api/v1.1"
def get_updated(self):
return "2011-06-23T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'os-extra_specs',
FlavorExtraSpecsController(),
parent=dict(member_name='flavor', collection_name='flavors'))
resources.append(res)
return resources
+173
View File
@@ -0,0 +1,173 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
from webob import exc
from nova import exception
from nova import network
from nova import rpc
from nova.api.openstack import faults
from nova.api.openstack import extensions
def _translate_floating_ip_view(floating_ip):
result = {'id': floating_ip['id'],
'ip': floating_ip['address']}
if 'fixed_ip' in floating_ip:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
else:
result['fixed_ip'] = None
if 'instance' in floating_ip:
result['instance_id'] = floating_ip['instance']['id']
else:
result['instance_id'] = None
return {'floating_ip': result}
def _translate_floating_ips_view(floating_ips):
return {'floating_ips': [_translate_floating_ip_view(floating_ip)
for floating_ip in floating_ips]}
class FloatingIPController(object):
"""The Floating IPs API controller for the OpenStack API."""
_serialization_metadata = {
'application/xml': {
"attributes": {
"floating_ip": [
"id",
"ip",
"instance_id",
"fixed_ip",
]}}}
def __init__(self):
self.network_api = network.API()
super(FloatingIPController, self).__init__()
def show(self, req, id):
"""Return data about the given floating ip."""
context = req.environ['nova.context']
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return _translate_floating_ip_view(floating_ip)
def index(self, req):
context = req.environ['nova.context']
floating_ips = self.network_api.list_floating_ips(context)
return _translate_floating_ips_view(floating_ips)
def create(self, req):
context = req.environ['nova.context']
try:
address = self.network_api.allocate_floating_ip(context)
ip = self.network_api.get_floating_ip_by_ip(context, address)
except rpc.RemoteError as ex:
# NOTE(tr3buchet) - why does this block exist?
if ex.exc_type == 'NoMoreFloatingIps':
raise exception.NoMoreFloatingIps()
else:
raise
return {'allocated': {
"id": ip['id'],
"floating_ip": ip['address']}}
def delete(self, req, id):
context = req.environ['nova.context']
ip = self.network_api.get_floating_ip(context, id)
self.network_api.release_floating_ip(context, address=ip)
return {'released': {
"id": ip['id'],
"floating_ip": ip['address']}}
def associate(self, req, id, body):
""" /floating_ips/{id}/associate fixed ip in body """
context = req.environ['nova.context']
floating_ip = self._get_ip_by_id(context, id)
fixed_ip = body['associate_address']['fixed_ip']
try:
self.network_api.associate_floating_ip(context,
floating_ip, fixed_ip)
except rpc.RemoteError:
raise
return {'associated':
{
"floating_ip_id": id,
"floating_ip": floating_ip,
"fixed_ip": fixed_ip}}
def disassociate(self, req, id):
""" POST /floating_ips/{id}/disassociate """
context = req.environ['nova.context']
floating_ip = self.network_api.get_floating_ip(context, id)
address = floating_ip['address']
fixed_ip = floating_ip['fixed_ip']['address']
try:
self.network_api.disassociate_floating_ip(context, address)
except rpc.RemoteError:
raise
return {'disassociated': {'floating_ip': address,
'fixed_ip': fixed_ip}}
def _get_ip_by_id(self, context, value):
"""Checks that value is id and then returns its address."""
return self.network_api.get_floating_ip(context, value)['address']
class Floating_ips(extensions.ExtensionDescriptor):
def get_name(self):
return "Floating_ips"
def get_alias(self):
return "os-floating-ips"
def get_description(self):
return "Floating IPs support"
def get_namespace(self):
return "http://docs.openstack.org/ext/floating_ips/api/v1.1"
def get_updated(self):
return "2011-06-16T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ips',
FloatingIPController(),
member_actions={
'associate': 'POST',
'disassociate': 'POST'})
resources.append(res)
return resources
+114
View File
@@ -0,0 +1,114 @@
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
import webob.exc
from nova import compute
from nova import exception
from nova import flags
from nova import log as logging
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import faults
from nova.scheduler import api as scheduler_api
LOG = logging.getLogger("nova.api.hosts")
FLAGS = flags.FLAGS
def _list_hosts(req, service=None):
"""Returns a summary list of hosts, optionally filtering
by service type.
"""
context = req.environ['nova.context']
hosts = scheduler_api.get_host_list(context)
if service:
hosts = [host for host in hosts
if host["service"] == service]
return hosts
def check_host(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, service=None, *args, **kwargs):
listed_hosts = _list_hosts(req, service)
hosts = [h["host_name"] for h in listed_hosts]
if id in hosts:
return fn(self, req, id, *args, **kwargs)
else:
raise exception.HostNotFound(host=id)
return wrapped
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
super(HostController, self).__init__()
def index(self, req):
return {'hosts': _list_hosts(req)}
@check_host
def update(self, req, id, body):
for raw_key, raw_val in body.iteritems():
key = raw_key.lower().strip()
val = raw_val.lower().strip()
# NOTE: (dabo) Right now only 'status' can be set, but other
# actions may follow.
if key == "status":
if val[:6] in ("enable", "disabl"):
return self._set_enabled_status(req, id,
enabled=(val.startswith("enable")))
else:
explanation = _("Invalid status: '%s'") % raw_val
raise webob.exc.HTTPBadRequest(explanation=explanation)
else:
explanation = _("Invalid update setting: '%s'") % raw_key
raise webob.exc.HTTPBadRequest(explanation=explanation)
def _set_enabled_status(self, req, host, enabled):
"""Sets the specified host's ability to accept new instances."""
context = req.environ['nova.context']
state = "enabled" if enabled else "disabled"
LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
result = self.compute_api.set_host_enabled(context, host=host,
enabled=enabled)
return {"host": host, "status": result}
class Hosts(extensions.ExtensionDescriptor):
def get_name(self):
return "Hosts"
def get_alias(self):
return "os-hosts"
def get_description(self):
return "Host administration"
def get_namespace(self):
return "http://docs.openstack.org/ext/hosts/api/v1.1"
def get_updated(self):
return "2011-06-29T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts', HostController(),
collection_actions={'update': 'PUT'}, member_actions={})]
return resources
+125
View File
@@ -0,0 +1,125 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The multinic extension."""
from webob import exc
from nova import compute
from nova import log as logging
from nova.api.openstack import extensions
from nova.api.openstack import faults
LOG = logging.getLogger("nova.api.multinic")
# Note: The class name is as it has to be for this to be loaded as an
# extension--only first character capitalized.
class Multinic(extensions.ExtensionDescriptor):
"""The multinic extension.
Exposes addFixedIp and removeFixedIp actions on servers.
"""
def __init__(self, *args, **kwargs):
"""Initialize the extension.
Gets a compute.API object so we can call the back-end
add_fixed_ip() and remove_fixed_ip() methods.
"""
super(Multinic, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def get_name(self):
"""Return the extension name, as required by contract."""
return "Multinic"
def get_alias(self):
"""Return the extension alias, as required by contract."""
return "NMN"
def get_description(self):
"""Return the extension description, as required by contract."""
return "Multiple network support"
def get_namespace(self):
"""Return the namespace, as required by contract."""
return "http://docs.openstack.org/ext/multinic/api/v1.1"
def get_updated(self):
"""Return the last updated timestamp, as required by contract."""
return "2011-06-09T00:00:00+00:00"
def get_actions(self):
"""Return the actions the extension adds, as required by contract."""
actions = []
# Add the add_fixed_ip action
act = extensions.ActionExtension("servers", "addFixedIp",
self._add_fixed_ip)
actions.append(act)
# Add the remove_fixed_ip action
act = extensions.ActionExtension("servers", "removeFixedIp",
self._remove_fixed_ip)
actions.append(act)
return actions
def _add_fixed_ip(self, input_dict, req, id):
"""Adds an IP on a given network to an instance."""
try:
# Validate the input entity
if 'networkId' not in input_dict['addFixedIp']:
LOG.exception(_("Missing 'networkId' argument for addFixedIp"))
return faults.Fault(exc.HTTPUnprocessableEntity())
# Add the fixed IP
network_id = input_dict['addFixedIp']['networkId']
self.compute_api.add_fixed_ip(req.environ['nova.context'], id,
network_id)
except Exception, e:
LOG.exception(_("Error in addFixedIp %s"), e)
return faults.Fault(exc.HTTPBadRequest())
return exc.HTTPAccepted()
def _remove_fixed_ip(self, input_dict, req, id):
"""Removes an IP from an instance."""
try:
# Validate the input entity
if 'address' not in input_dict['removeFixedIp']:
LOG.exception(_("Missing 'address' argument for "
"removeFixedIp"))
return faults.Fault(exc.HTTPUnprocessableEntity())
# Remove the fixed IP
address = input_dict['removeFixedIp']['address']
self.compute_api.remove_fixed_ip(req.environ['nova.context'], id,
address)
except Exception, e:
LOG.exception(_("Error in removeFixedIp %s"), e)
return faults.Fault(exc.HTTPBadRequest())
return exc.HTTPAccepted()
+13 -2
View File
@@ -114,6 +114,15 @@ class CreateInstanceHelper(object):
name = name.strip()
reservation_id = body['server'].get('reservation_id')
min_count = body['server'].get('min_count')
max_count = body['server'].get('max_count')
# min_count and max_count are optional. If they exist, they come
# in as strings. We want to default 'min_count' to 1, and default
# 'max_count' to be 'min_count'.
min_count = int(min_count) if min_count else 1
max_count = int(max_count) if max_count else min_count
if min_count > max_count:
min_count = max_count
try:
inst_type = \
@@ -137,7 +146,9 @@ class CreateInstanceHelper(object):
injected_files=injected_files,
admin_password=password,
zone_blob=zone_blob,
reservation_id=reservation_id))
reservation_id=reservation_id,
min_count=min_count,
max_count=max_count))
except quota.QuotaError as error:
self._handle_quota_error(error)
except exception.ImageNotFound as error:
@@ -278,7 +289,7 @@ class ServerXMLDeserializer(wsgi.XMLDeserializer):
"""Deserialize an xml-formatted server create request"""
dom = minidom.parseString(string)
server = self._extract_server(dom)
return {'server': server}
return {'body': {'server': server}}
def _extract_server(self, node):
"""Marshal the server attribute of a parsed request"""
+4 -2
View File
@@ -85,8 +85,10 @@ def create_resource(version='1.0'):
'1.1': wsgi.XMLNS_V11,
}[version]
serializers = {
body_serializers = {
'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns),
}
return wsgi.Resource(controller, serializers=serializers)
serializer = wsgi.ResponseSerializer(body_serializers)
return wsgi.Resource(controller, serializer=serializer)
+13 -10
View File
@@ -112,18 +112,18 @@ class Controller(object):
class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
def __init__(self):
xmlns = wsgi.XMLNS_V11
def __init__(self, xmlns=wsgi.XMLNS_V11):
super(ImageMetadataXMLSerializer, self).__init__(xmlns=xmlns)
def _meta_item_to_xml(self, doc, key, value):
node = doc.createElement('meta')
node.setAttribute('key', key)
text = doc.createTextNode(value)
doc.appendChild(node)
node.setAttribute('key', '%s' % key)
text = doc.createTextNode('%s' % value)
node.appendChild(text)
return node
def _meta_list_to_xml(self, xml_doc, meta_items):
def meta_list_to_xml(self, xml_doc, meta_items):
container_node = xml_doc.createElement('metadata')
for (key, value) in meta_items:
item_node = self._meta_item_to_xml(xml_doc, key, value)
@@ -133,9 +133,10 @@ class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
def _meta_list_to_xml_string(self, metadata_dict):
xml_doc = minidom.Document()
items = metadata_dict['metadata'].items()
container_node = self._meta_list_to_xml(xml_doc, items)
container_node = self.meta_list_to_xml(xml_doc, items)
xml_doc.appendChild(container_node)
self._add_xmlns(container_node)
return container_node.toprettyxml(indent=' ')
return xml_doc.toprettyxml(indent=' ', encoding='UTF-8')
def index(self, metadata_dict):
return self._meta_list_to_xml_string(metadata_dict)
@@ -147,8 +148,9 @@ class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
xml_doc = minidom.Document()
item_key, item_value = meta_item_dict.items()[0]
item_node = self._meta_item_to_xml(xml_doc, item_key, item_value)
xml_doc.appendChild(item_node)
self._add_xmlns(item_node)
return item_node.toprettyxml(indent=' ')
return xml_doc.toprettyxml(indent=' ', encoding='UTF-8')
def show(self, meta_item_dict):
return self._meta_item_to_xml_string(meta_item_dict['meta'])
@@ -158,8 +160,9 @@ class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
def create_resource():
serializers = {
body_serializers = {
'application/xml': ImageMetadataXMLSerializer(),
}
serializer = wsgi.ResponseSerializer(body_serializers)
return wsgi.Resource(Controller(), serializers=serializers)
return wsgi.Resource(Controller(), serializer=serializer)
+135 -29
View File
@@ -13,18 +13,20 @@
# License for the specific language governing permissions and limitations
# under the License.
import urlparse
import os.path
import webob.exc
from xml.dom import minidom
from nova import compute
from nova import exception
from nova import flags
import nova.image
from nova import log
from nova import utils
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.api.openstack import image_metadata
from nova.api.openstack.views import images as images_view
from nova.api.openstack import wsgi
@@ -90,31 +92,67 @@ class Controller(object):
return webob.exc.HTTPNoContent()
def create(self, req, body):
"""Snapshot a server instance and save the image.
"""Snapshot or backup a server instance and save the image.
Images now have an `image_type` associated with them, which can be
'snapshot' or the backup type, like 'daily' or 'weekly'.
If the image_type is backup-like, then the rotation factor can be
included and that will cause the oldest backups that exceed the
rotation factor to be deleted.
:param req: `wsgi.Request` object
"""
def get_param(param):
try:
return body["image"][param]
except KeyError:
raise webob.exc.HTTPBadRequest(explanation="Missing required "
"param: %s" % param)
context = req.environ['nova.context']
content_type = req.get_content_type()
if not body:
raise webob.exc.HTTPBadRequest()
image_type = body["image"].get("image_type", "snapshot")
try:
server_id = self._server_id_from_req(req, body)
image_name = body["image"]["name"]
except KeyError:
raise webob.exc.HTTPBadRequest()
image_name = get_param("name")
props = self._get_extra_properties(req, body)
image = self._compute_service.snapshot(context, server_id,
image_name, props)
if image_type == "snapshot":
image = self._compute_service.snapshot(
context, server_id, image_name,
extra_properties=props)
elif image_type == "backup":
# NOTE(sirp): Unlike snapshot, backup is not a customer facing
# API call; rather, it's used by the internal backup scheduler
if not FLAGS.allow_admin_api:
raise webob.exc.HTTPBadRequest(
explanation="Admin API Required")
backup_type = get_param("backup_type")
rotation = int(get_param("rotation"))
image = self._compute_service.backup(
context, server_id, image_name,
backup_type, rotation, extra_properties=props)
else:
LOG.error(_("Invalid image_type '%s' passed") % image_type)
raise webob.exc.HTTPBadRequest(explanation="Invalue image_type: "
"%s" % image_type)
return dict(image=self.get_builder(req).build(image, detail=True))
def get_builder(self, request):
"""Indicates that you must use a Controller subclass."""
raise NotImplementedError
raise NotImplementedError()
def _server_id_from_req(self, req, data):
raise NotImplementedError()
@@ -181,9 +219,9 @@ class ControllerV11(Controller):
"""
context = req.environ['nova.context']
filters = self._get_filters(req)
(marker, limit) = common.get_pagination_params(req)
images = self._image_service.index(
context, filters=filters, marker=marker, limit=limit)
page_params = common.get_pagination_params(req)
images = self._image_service.index(context, filters=filters,
**page_params)
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=False) for image in images])
@@ -195,9 +233,9 @@ class ControllerV11(Controller):
"""
context = req.environ['nova.context']
filters = self._get_filters(req)
(marker, limit) = common.get_pagination_params(req)
images = self._image_service.detail(
context, filters=filters, marker=marker, limit=limit)
page_params = common.get_pagination_params(req)
images = self._image_service.detail(context, filters=filters,
**page_params)
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
@@ -208,13 +246,23 @@ class ControllerV11(Controller):
msg = _("Expected serverRef attribute on server entity.")
raise webob.exc.HTTPBadRequest(explanation=msg)
head, tail = os.path.split(server_ref)
if not server_ref.startswith('http'):
return server_ref
if head and head != os.path.join(req.application_url, 'servers'):
passed = urlparse.urlparse(server_ref)
expected = urlparse.urlparse(req.application_url)
version = expected.path.split('/')[1]
expected_prefix = "/%s/servers/" % version
_empty, _sep, server_id = passed.path.partition(expected_prefix)
scheme_ok = passed.scheme == expected.scheme
host_ok = passed.hostname == expected.hostname
port_ok = (passed.port == expected.port or
passed.port == FLAGS.osapi_port)
if not (scheme_ok and port_ok and host_ok and server_id):
msg = _("serverRef must match request url")
raise webob.exc.HTTPBadRequest(explanation=msg)
return tail
return server_id
def _get_extra_properties(self, req, data):
server_ref = data['image']['serverRef']
@@ -224,16 +272,7 @@ class ControllerV11(Controller):
return {'instance_ref': server_ref}
def create_resource(version='1.0'):
controller = {
'1.0': ControllerV10,
'1.1': ControllerV11,
}[version]()
xmlns = {
'1.0': wsgi.XMLNS_V10,
'1.1': wsgi.XMLNS_V11,
}[version]
class ImageXMLSerializer(wsgi.XMLDictSerializer):
metadata = {
"attributes": {
@@ -243,9 +282,76 @@ def create_resource(version='1.0'):
},
}
serializers = {
'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
metadata=metadata),
xmlns = wsgi.XMLNS_V11
def __init__(self):
self.metadata_serializer = image_metadata.ImageMetadataXMLSerializer()
def _image_to_xml(self, xml_doc, image):
try:
metadata = image.pop('metadata').items()
except Exception:
LOG.debug(_("Image object missing metadata attribute"))
metadata = {}
node = self._to_xml_node(xml_doc, self.metadata, 'image', image)
metadata_node = self.metadata_serializer.meta_list_to_xml(xml_doc,
metadata)
node.appendChild(metadata_node)
return node
def _image_list_to_xml(self, xml_doc, images):
container_node = xml_doc.createElement('images')
for image in images:
item_node = self._image_to_xml(xml_doc, image)
container_node.appendChild(item_node)
return container_node
def _image_to_xml_string(self, image):
xml_doc = minidom.Document()
item_node = self._image_to_xml(xml_doc, image)
self._add_xmlns(item_node)
return item_node.toprettyxml(indent=' ')
def _image_list_to_xml_string(self, images):
xml_doc = minidom.Document()
container_node = self._image_list_to_xml(xml_doc, images)
self._add_xmlns(container_node)
return container_node.toprettyxml(indent=' ')
def detail(self, images_dict):
return self._image_list_to_xml_string(images_dict['images'])
def show(self, image_dict):
return self._image_to_xml_string(image_dict['image'])
def create(self, image_dict):
return self._image_to_xml_string(image_dict['image'])
def create_resource(version='1.0'):
controller = {
'1.0': ControllerV10,
'1.1': ControllerV11,
}[version]()
metadata = {
"attributes": {
"image": ["id", "name", "updated", "created", "status",
"serverId", "progress", "serverRef"],
"link": ["rel", "type", "href"],
},
}
return wsgi.Resource(controller, serializers=serializers)
xml_serializer = {
'1.0': wsgi.XMLDictSerializer(metadata, wsgi.XMLNS_V10),
'1.1': ImageXMLSerializer(),
}[version]
body_serializers = {
'application/xml': xml_serializer,
}
serializer = wsgi.ResponseSerializer(body_serializers)
return wsgi.Resource(controller, serializer=serializer)
+3 -2
View File
@@ -70,9 +70,10 @@ def create_resource():
},
}
serializers = {
body_serializers = {
'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
xmlns=wsgi.XMLNS_V10),
}
serializer = wsgi.ResponseSerializer(body_serializers)
return wsgi.Resource(Controller(), serializers=serializers)
return wsgi.Resource(Controller(), serializer=serializer)
+4 -2
View File
@@ -97,12 +97,14 @@ def create_resource(version='1.0'):
},
}
serializers = {
body_serializers = {
'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
metadata=metadata),
}
return wsgi.Resource(controller, serializers=serializers)
serializer = wsgi.ResponseSerializer(body_serializers)
return wsgi.Resource(controller, serializer=serializer)
class Limit(object):
+4 -2
View File
@@ -123,8 +123,10 @@ class Controller(object):
def create_resource():
serializers = {
body_serializers = {
'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11),
}
return wsgi.Resource(Controller(), serializers=serializers)
serializer = wsgi.ResponseSerializer(body_serializers)
return wsgi.Resource(Controller(), serializer=serializer)
+31 -13
View File
@@ -76,10 +76,17 @@ class Controller(object):
builder - the response model builder
"""
reservation_id = req.str_GET.get('reservation_id')
query_str = req.str_GET
reservation_id = query_str.get('reservation_id')
project_id = query_str.get('project_id')
fixed_ip = query_str.get('fixed_ip')
recurse_zones = utils.bool_from_str(query_str.get('recurse_zones'))
instance_list = self.compute_api.get_all(
req.environ['nova.context'],
reservation_id=reservation_id)
req.environ['nova.context'],
reservation_id=reservation_id,
project_id=project_id,
fixed_ip=fixed_ip,
recurse_zones=recurse_zones)
limited_list = self._limit_items(instance_list, req)
builder = self._get_view_builder(req)
servers = [builder.build(inst, is_detail)['server']
@@ -111,14 +118,15 @@ class Controller(object):
extra_values = None
result = None
try:
extra_values, result = self.helper.create_instance(
req, body, self.compute_api.create)
extra_values, instances = self.helper.create_instance(
req, body, self.compute_api.create)
except faults.Fault, f:
return f
instances = result
(inst, ) = instances
# We can only return 1 instance via the API, if we happen to
# build more than one... instances is a list, so we'll just
# use the first one..
inst = instances[0]
for key in ['instance_type', 'image_ref']:
inst[key] = extra_values[key]
@@ -168,7 +176,7 @@ class Controller(object):
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
'rebuild': self._action_rebuild,
}
'migrate': self._action_migrate}
for key in actions.keys():
if key in body:
@@ -212,6 +220,14 @@ class Controller(object):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
def _action_migrate(self, input_dict, req, id):
try:
self.compute_api.resize(req.environ['nova.context'], id)
except Exception, e:
LOG.exception(_("Error in migrate %s"), e)
return faults.Fault(exc.HTTPBadRequest())
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
def lock(self, req, id):
"""
@@ -608,14 +624,16 @@ def create_resource(version='1.0'):
'1.1': wsgi.XMLNS_V11,
}[version]
serializers = {
body_serializers = {
'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
xmlns=xmlns),
}
deserializers = {
body_deserializers = {
'application/xml': helper.ServerXMLDeserializer(),
}
return wsgi.Resource(controller, serializers=serializers,
deserializers=deserializers)
serializer = wsgi.ResponseSerializer(body_serializers)
deserializer = wsgi.RequestDeserializer(body_deserializers)
return wsgi.Resource(controller, deserializer, serializer)
+6 -6
View File
@@ -24,27 +24,27 @@ from nova.api.openstack import wsgi
class Controller(object):
""" The Shared IP Groups Controller for the Openstack API """
def index(self, req):
def index(self, req, **kwargs):
""" Returns a list of Shared IP Groups for the user """
raise faults.Fault(exc.HTTPNotImplemented())
def show(self, req, id):
def show(self, req, id, **kwargs):
""" Shows in-depth information on a specific Shared IP Group """
raise faults.Fault(exc.HTTPNotImplemented())
def update(self, req, id, body):
def update(self, req, id, **kwargs):
""" You can't update a Shared IP Group """
raise faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, id):
def delete(self, req, id, **kwargs):
""" Deletes a Shared IP Group """
raise faults.Fault(exc.HTTPNotImplemented())
def detail(self, req):
def detail(self, req, **kwargs):
""" Returns a complete list of Shared IP Groups """
raise faults.Fault(exc.HTTPNotImplemented())
def create(self, req, body):
def create(self, req, **kwargs):
""" Creates a new Shared IP group """
raise faults.Fault(exc.HTTPNotImplemented())
+4 -2
View File
@@ -105,8 +105,10 @@ def create_resource():
},
}
serializers = {
body_serializers = {
'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
}
return wsgi.Resource(Controller(), serializers=serializers)
serializer = wsgi.ResponseSerializer(body_serializers)
return wsgi.Resource(Controller(), serializer=serializer)
+3 -2
View File
@@ -31,11 +31,12 @@ class Versions(wsgi.Resource):
}
}
serializers = {
body_serializers = {
'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
}
serializer = wsgi.ResponseSerializer(body_serializers)
wsgi.Resource.__init__(self, None, serializers=serializers)
wsgi.Resource.__init__(self, None, serializer=serializer)
def dispatch(self, request, *args):
"""Respond to a request for all OpenStack API versions."""
+6 -4
View File
@@ -33,16 +33,18 @@ class ViewBuilderV10(ViewBuilder):
return dict(public=public_ips, private=private_ips)
def build_public_parts(self, inst):
return utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
return utils.get_from_path(inst, 'fixed_ips/floating_ips/address')
def build_private_parts(self, inst):
return utils.get_from_path(inst, 'fixed_ip/address')
return utils.get_from_path(inst, 'fixed_ips/address')
class ViewBuilderV11(ViewBuilder):
def build(self, inst):
private_ips = utils.get_from_path(inst, 'fixed_ip/address')
# TODO(tr3buchet) - this shouldn't be hard coded to 4...
private_ips = utils.get_from_path(inst, 'fixed_ips/address')
private_ips = [dict(version=4, addr=a) for a in private_ips]
public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
public_ips = utils.get_from_path(inst,
'fixed_ips/floating_ips/address')
public_ips = [dict(version=4, addr=a) for a in public_ips]
return dict(public=public_ips, private=private_ips)
+9 -7
View File
@@ -71,6 +71,7 @@ class ViewBuilderV11(ViewBuilder):
def _build_links(self, flavor_obj):
"""Generate a container of links that refer to the provided flavor."""
href = self.generate_href(flavor_obj["id"])
bookmark = self.generate_bookmark(flavor_obj["id"])
links = [
{
@@ -79,13 +80,7 @@ class ViewBuilderV11(ViewBuilder):
},
{
"rel": "bookmark",
"type": "application/json",
"href": href,
},
{
"rel": "bookmark",
"type": "application/xml",
"href": href,
"href": bookmark,
},
]
@@ -94,3 +89,10 @@ class ViewBuilderV11(ViewBuilder):
def generate_href(self, flavor_id):
"""Create an url that refers to a specific flavor id."""
return "%s/flavors/%s" % (self.base_url, flavor_id)
def generate_bookmark(self, flavor_id):
"""Create an url that refers to a specific flavor id."""
return "%s/flavors/%s" % (
common.remove_version_from_href(self.base_url),
flavor_id,
)
+12 -7
View File
@@ -17,6 +17,8 @@
import os.path
from nova.api.openstack import common
class ViewBuilder(object):
"""Base class for generating responses to OpenStack API image requests."""
@@ -104,6 +106,10 @@ class ViewBuilderV11(ViewBuilder):
"""Return a standardized image structure for display by the API."""
image = ViewBuilder.build(self, image_obj, detail)
href = self.generate_href(image_obj["id"])
bookmark = self.generate_bookmark(image_obj["id"])
if detail:
image["metadata"] = image_obj.get("properties", {})
image["links"] = [{
"rel": "self",
@@ -111,13 +117,12 @@ class ViewBuilderV11(ViewBuilder):
},
{
"rel": "bookmark",
"type": "application/json",
"href": href,
},
{
"rel": "bookmark",
"type": "application/xml",
"href": href,
"href": bookmark,
}]
return image
def generate_bookmark(self, image_id):
"""Create an url that refers to a specific flavor id."""
return os.path.join(common.remove_version_from_href(self._url),
"images", str(image_id))
+7 -7
View File
@@ -156,6 +156,7 @@ class ViewBuilderV11(ViewBuilder):
def _build_links(self, response, inst):
href = self.generate_href(inst["id"])
bookmark = self.generate_bookmark(inst["id"])
links = [
{
@@ -164,13 +165,7 @@ class ViewBuilderV11(ViewBuilder):
},
{
"rel": "bookmark",
"type": "application/json",
"href": href,
},
{
"rel": "bookmark",
"type": "application/xml",
"href": href,
"href": bookmark,
},
]
@@ -179,3 +174,8 @@ class ViewBuilderV11(ViewBuilder):
def generate_href(self, server_id):
"""Create an url that refers to a specific server id."""
return os.path.join(self.base_url, "servers", str(server_id))
def generate_bookmark(self, server_id):
"""Create an url that refers to a specific flavor id."""
return os.path.join(common.remove_version_from_href(self.base_url),
"servers", str(server_id))
+130 -71
View File
@@ -46,38 +46,51 @@ class Request(webob.Request):
"""
if not "Content-Type" in self.headers:
raise exception.InvalidContentType(content_type=None)
return None
allowed_types = ("application/xml", "application/json")
content_type = self.content_type
if content_type not in allowed_types:
raise exception.InvalidContentType(content_type=content_type)
else:
return content_type
return content_type
class TextDeserializer(object):
"""Custom request body deserialization based on controller action name."""
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization"""
def deserialize(self, datastring, action='default'):
"""Find local deserialization method and parse request body."""
action_method = getattr(self, str(action), self.default)
return action_method(datastring)
return self.dispatch(datastring, action=action)
def default(self, datastring):
"""Default deserialization code should live here"""
raise NotImplementedError()
return {}
class JSONDeserializer(TextDeserializer):
def default(self, datastring):
def _from_json(self, datastring):
try:
return utils.loads(datastring)
except ValueError:
raise exception.MalformedRequestBody(
reason=_("malformed JSON in request body"))
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
@@ -90,15 +103,15 @@ class XMLDeserializer(TextDeserializer):
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def default(self, datastring):
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
try:
node = minidom.parseString(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
except expat.ExpatError:
raise exception.MalformedRequestBody(
reason=_("malformed XML in request body"))
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
@@ -121,21 +134,32 @@ class XMLDeserializer(TextDeserializer):
listnames)
return result
def default(self, datastring):
return {'body': self._from_xml(datastring)}
class RequestHeadersDeserializer(ActionDispatcher):
"""Default request headers deserializer"""
def deserialize(self, request, action):
return self.dispatch(request, action=action)
def default(self, request):
return {}
class RequestDeserializer(object):
"""Break up a Request object into more useful pieces."""
def __init__(self, deserializers=None):
"""
:param deserializers: dictionary of content-type-specific deserializers
"""
self.deserializers = {
def __init__(self, body_deserializers=None, headers_deserializer=None):
self.body_deserializers = {
'application/xml': XMLDeserializer(),
'application/json': JSONDeserializer(),
}
self.body_deserializers.update(body_deserializers or {})
self.deserializers.update(deserializers or {})
self.headers_deserializer = headers_deserializer or \
RequestHeadersDeserializer()
def deserialize(self, request):
"""Extract necessary pieces of the request.
@@ -149,26 +173,42 @@ class RequestDeserializer(object):
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
if request.method.lower() in ('post', 'put'):
if len(request.body) == 0:
action_args['body'] = None
else:
content_type = request.get_content_type()
deserializer = self.get_deserializer(content_type)
try:
body = deserializer.deserialize(request.body, action)
action_args['body'] = body
except exception.InvalidContentType:
action_args['body'] = None
action_args.update(self.deserialize_headers(request, action))
action_args.update(self.deserialize_body(request, action))
accept = self.get_expected_content_type(request)
return (action, action_args, accept)
def get_deserializer(self, content_type):
def deserialize_headers(self, request, action):
return self.headers_deserializer.deserialize(request, action)
def deserialize_body(self, request, action):
try:
return self.deserializers[content_type]
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
return {}
if content_type is None:
LOG.debug(_("No Content-Type provided in request"))
return {}
if not len(request.body) > 0:
LOG.debug(_("Empty body provided in request"))
return {}
try:
deserializer = self.get_body_deserializer(content_type)
except exception.InvalidContentType:
LOG.debug(_("Unable to deserialize body as provided Content-Type"))
raise
return deserializer.deserialize(request.body, action)
def get_body_deserializer(self, content_type):
try:
return self.body_deserializers[content_type]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
@@ -195,20 +235,18 @@ class RequestDeserializer(object):
return args
class DictSerializer(object):
"""Custom response body serialization based on controller action name."""
class DictSerializer(ActionDispatcher):
"""Default request body serialization"""
def serialize(self, data, action='default'):
"""Find local serialization method and encode response body."""
action_method = getattr(self, str(action), self.default)
return action_method(data)
return self.dispatch(data, action=action)
def default(self, data):
"""Default serialization code should live here"""
raise NotImplementedError()
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization"""
def default(self, data):
return utils.dumps(data)
@@ -295,19 +333,28 @@ class XMLDictSerializer(DictSerializer):
return result
class ResponseHeadersSerializer(ActionDispatcher):
"""Default response headers serialization"""
def serialize(self, response, data, action):
self.dispatch(response, data, action=action)
def default(self, response, data):
response.status_int = 200
class ResponseSerializer(object):
"""Encode the necessary pieces into a response object"""
def __init__(self, serializers=None):
"""
:param serializers: dictionary of content-type-specific serializers
"""
self.serializers = {
def __init__(self, body_serializers=None, headers_serializer=None):
self.body_serializers = {
'application/xml': XMLDictSerializer(),
'application/json': JSONDictSerializer(),
}
self.serializers.update(serializers or {})
self.body_serializers.update(body_serializers or {})
self.headers_serializer = headers_serializer or \
ResponseHeadersSerializer()
def serialize(self, response_data, content_type, action='default'):
"""Serialize a dict into a string and wrap in a wsgi.Request object.
@@ -317,16 +364,21 @@ class ResponseSerializer(object):
"""
response = webob.Response()
response.headers['Content-Type'] = content_type
serializer = self.get_serializer(content_type)
response.body = serializer.serialize(response_data, action)
self.serialize_headers(response, response_data, action)
self.serialize_body(response, response_data, content_type, action)
return response
def get_serializer(self, content_type):
def serialize_headers(self, response, data, action):
self.headers_serializer.serialize(response, data, action)
def serialize_body(self, response, data, content_type, action):
response.headers['Content-Type'] = content_type
serializer = self.get_body_serializer(content_type)
response.body = serializer.serialize(data, action)
def get_body_serializer(self, content_type):
try:
return self.serializers[content_type]
return self.body_serializers[content_type]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
@@ -343,27 +395,28 @@ class Resource(wsgi.Application):
serialized by requested content type.
"""
def __init__(self, controller, serializers=None, deserializers=None):
def __init__(self, controller, deserializer=None, serializer=None):
"""
:param controller: object that implement methods created by routes lib
:param serializers: dict of content-type specific text serializers
:param deserializers: dict of content-type specific text deserializers
:param deserializer: object that can serialize the output of a
controller into a webob response
:param serializer: object that can deserialize a webob request
into necessary pieces
"""
self.controller = controller
self.serializer = ResponseSerializer(serializers)
self.deserializer = RequestDeserializer(deserializers)
self.deserializer = deserializer or RequestDeserializer()
self.serializer = serializer or ResponseSerializer()
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.debug("%(method)s %(url)s" % {"method": request.method,
LOG.info("%(method)s %(url)s" % {"method": request.method,
"url": request.url})
try:
action, action_args, accept = self.deserializer.deserialize(
request)
action, args, accept = self.deserializer.deserialize(request)
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return webob.exc.HTTPBadRequest(explanation=msg)
@@ -371,11 +424,13 @@ class Resource(wsgi.Application):
msg = _("Malformed request body")
return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg))
action_result = self.dispatch(request, action, action_args)
action_result = self.dispatch(request, action, args)
#TODO(bcwaldon): find a more elegant way to pass through non-dict types
if type(action_result) is dict:
response = self.serializer.serialize(action_result, accept, action)
if type(action_result) is dict or action_result is None:
response = self.serializer.serialize(action_result,
accept,
action=action)
else:
response = action_result
@@ -386,7 +441,7 @@ class Resource(wsgi.Application):
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s" % msg_dict)
LOG.debug(msg)
LOG.info(msg)
return response
@@ -394,4 +449,8 @@ class Resource(wsgi.Application):
"""Find action-spefic method on controller and call it."""
controller_method = getattr(self.controller, action)
return controller_method(req=request, **action_args)
try:
return controller_method(req=request, **action_args)
except TypeError, exc:
LOG.debug(str(exc))
return webob.exc.HTTPBadRequest()
+5 -4
View File
@@ -196,14 +196,15 @@ def create_resource(version):
},
}
serializers = {
body_serializers = {
'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10,
metadata=metadata),
}
serializer = wsgi.ResponseSerializer(body_serializers)
deserializers = {
body_deserializers = {
'application/xml': helper.ServerXMLDeserializer(),
}
deserializer = wsgi.RequestDeserializer(body_deserializers)
return wsgi.Resource(controller, serializers=serializers,
deserializers=deserializers)
return wsgi.Resource(controller, deserializer, serializer)
+24
View File
@@ -100,6 +100,11 @@ class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
pass
class SERVER_DOWN(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
def initialize(_uri):
"""Opens a fake connection with an LDAP server."""
return FakeLDAP()
@@ -202,25 +207,38 @@ def _to_json(unencoded):
return json.dumps(list(unencoded))
server_fail = False
class FakeLDAP(object):
"""Fake LDAP connection."""
def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN
pass
def unbind_s(self):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN
pass
def add_s(self, dn, attr):
"""Add an object with the specified attributes at dn."""
if server_fail:
raise SERVER_DOWN
key = "%s%s" % (self.__prefix, dn)
value_dict = dict([(k, _to_json(v)) for k, v in attr])
Store.instance().hmset(key, value_dict)
def delete_s(self, dn):
"""Remove the ldap object at specified dn."""
if server_fail:
raise SERVER_DOWN
Store.instance().delete("%s%s" % (self.__prefix, dn))
def modify_s(self, dn, attrs):
@@ -232,6 +250,9 @@ class FakeLDAP(object):
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
if server_fail:
raise SERVER_DOWN
store = Store.instance()
key = "%s%s" % (self.__prefix, dn)
@@ -255,6 +276,9 @@ class FakeLDAP(object):
fields -- fields to return. Returns all fields if not specified
"""
if server_fail:
raise SERVER_DOWN
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
store = Store.instance()
+37 -2
View File
@@ -101,6 +101,41 @@ def sanitize(fn):
return _wrapped
class LDAPWrapper(object):
def __init__(self, ldap, url, user, password):
self.ldap = ldap
self.url = url
self.user = user
self.password = password
self.conn = None
def __wrap_reconnect(f):
def inner(self, *args, **kwargs):
if self.conn is None:
self.connect()
return f(self.conn)(*args, **kwargs)
else:
try:
return f(self.conn)(*args, **kwargs)
except self.ldap.SERVER_DOWN:
self.connect()
return f(self.conn)(*args, **kwargs)
return inner
def connect(self):
try:
self.conn = self.ldap.initialize(self.url)
self.conn.simple_bind_s(self.user, self.password)
except self.ldap.SERVER_DOWN:
self.conn = None
raise
search_s = __wrap_reconnect(lambda conn: conn.search_s)
add_s = __wrap_reconnect(lambda conn: conn.add_s)
delete_s = __wrap_reconnect(lambda conn: conn.delete_s)
modify_s = __wrap_reconnect(lambda conn: conn.modify_s)
class LdapDriver(object):
"""Ldap Auth driver
@@ -124,8 +159,8 @@ class LdapDriver(object):
LdapDriver.project_objectclass = 'novaProject'
self.__cache = None
if LdapDriver.conn is None:
LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url)
LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn,
LdapDriver.conn = LDAPWrapper(self.ldap, FLAGS.ldap_url,
FLAGS.ldap_user_dn,
FLAGS.ldap_password)
if LdapDriver.mc is None:
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
+10 -6
View File
@@ -630,13 +630,17 @@ class AuthManager(object):
not been allocated for user.
"""
network_ref = db.project_get_network(context.get_admin_context(),
Project.safe_id(project), False)
if not network_ref:
networks = db.project_get_networks(context.get_admin_context(),
Project.safe_id(project), False)
if not networks:
return (None, None)
return (network_ref['vpn_public_address'],
network_ref['vpn_public_port'])
# TODO(tr3buchet): not sure what you guys plan on doing with this
# but it's possible for a project to have multiple sets of vpn data
# for now I'm just returning the first one
network = networks[0]
return (network['vpn_public_address'],
network['vpn_public_port'])
def delete_project(self, project):
"""Deletes a project"""
+217 -100
View File
@@ -48,9 +48,27 @@ flags.DEFINE_integer('find_host_timeout', 30,
'Timeout after NN seconds when looking for a host.')
def generate_default_hostname(instance_id):
def generate_default_hostname(instance):
"""Default function to generate a hostname given an instance reference."""
return str(instance_id)
display_name = instance['display_name']
if display_name is None:
return 'server_%d' % (instance['id'],)
table = ''
deletions = ''
for i in xrange(256):
c = chr(i)
if ('a' <= c <= 'z') or ('0' <= c <= '9') or (c == '-'):
table += c
elif c == ' ':
table += '_'
elif ('A' <= c <= 'Z'):
table += c.lower()
else:
table += '\0'
deletions += c
if isinstance(display_name, unicode):
display_name = display_name.encode('latin-1', 'ignore')
return display_name.translate(table, deletions)
def _is_able_to_shutdown(instance, instance_id):
@@ -83,23 +101,6 @@ class API(base.Base):
self.hostname_factory = hostname_factory
super(API, self).__init__(**kwargs)
def get_network_topic(self, context, instance_id):
"""Get the network topic for an instance."""
try:
instance = self.get(context, instance_id)
except exception.NotFound:
LOG.warning(_("Instance %d was not found in get_network_topic"),
instance_id)
raise
host = instance['host']
if not host:
raise exception.Error(_("Instance %d has no host") % instance_id)
topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
return rpc.call(context,
topic,
{"method": "get_network_topic", "args": {'fake': 1}})
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
@@ -143,7 +144,7 @@ class API(base.Base):
def _check_create_parameters(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=1, max_count=1,
min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
@@ -154,6 +155,10 @@ class API(base.Base):
if not instance_type:
instance_type = instance_types.get_default_instance_type()
if not min_count:
min_count = 1
if not max_count:
max_count = min_count
num_instances = quota.allowed_instances(context, max_count,
instance_type)
@@ -203,18 +208,7 @@ class API(base.Base):
if ramdisk_id:
image_service.show(context, ramdisk_id)
if security_group is None:
security_group = ['default']
if not type(security_group) is list:
security_group = [security_group]
security_groups = []
self.ensure_default_security_group(context)
for security_group_name in security_group:
group = db.security_group_get_by_name(context,
context.project_id,
security_group_name)
security_groups.append(group['id'])
if key_data is None and key_name:
key_pair = db.key_pair_get(context, context.user_id, key_name)
@@ -249,29 +243,42 @@ class API(base.Base):
'architecture': architecture,
'vm_mode': vm_mode}
return (num_instances, base_options, security_groups)
return (num_instances, base_options)
def create_db_entry_for_new_instance(self, context, base_options,
security_groups, block_device_mapping, num=1):
security_group, block_device_mapping, num=1):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security
groups, MAC address, etc). This will called by create()
in the majority of situations, but all-at-once style
Schedulers may initiate the call."""
instance = dict(mac_address=utils.generate_mac(),
launch_index=num,
**base_options)
including any related table updates (such as security group,
etc).
This will called by create() in the majority of situations,
but create_all_at_once() style Schedulers may initiate the call.
If you are changing this method, be sure to update both
call paths.
"""
instance = dict(launch_index=num, **base_options)
instance = self.db.instance_create(context, instance)
instance_id = instance['id']
elevated = context.elevated()
if not security_groups:
security_groups = []
if security_group is None:
security_group = ['default']
if not isinstance(security_group, list):
security_group = [security_group]
security_groups = []
for security_group_name in security_group:
group = db.security_group_get_by_name(context,
context.project_id,
security_group_name)
security_groups.append(group['id'])
for security_group_id in security_groups:
self.db.instance_add_security_group(elevated,
instance_id,
security_group_id)
block_device_mapping = block_device_mapping or []
# NOTE(yamahata)
# tell vm driver to attach volume at boot time by updating
# BlockDeviceMapping
@@ -290,10 +297,12 @@ class API(base.Base):
self.db.block_device_mapping_create(elevated, values)
# Set sane defaults if not specified
updates = dict(hostname=self.hostname_factory(instance_id))
updates = {}
if (not hasattr(instance, 'display_name') or
instance.display_name is None):
updates['display_name'] = "Server %s" % instance_id
instance['display_name'] = updates['display_name']
updates['hostname'] = self.hostname_factory(instance)
instance = self.update(context, instance_id, **updates)
@@ -338,17 +347,16 @@ class API(base.Base):
def create_all_at_once(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=1, max_count=1,
min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
injected_files=None, admin_password=None, zone_blob=None,
reservation_id=None):
reservation_id=None, block_device_mapping=None):
"""Provision the instances by passing the whole request to
the Scheduler for execution. Returns a Reservation ID
related to the creation of all of these instances."""
num_instances, base_options, security_groups = \
self._check_create_parameters(
num_instances, base_options = self._check_create_parameters(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
@@ -368,7 +376,7 @@ class API(base.Base):
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=1, max_count=1,
min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
@@ -380,11 +388,13 @@ class API(base.Base):
Scheduler drivers, but may remove the effectiveness of the
more complicated drivers.
NOTE: If you change this method, be sure to change
create_all_at_once() at the same time!
Returns a list of instance dicts.
"""
num_instances, base_options, security_groups = \
self._check_create_parameters(
num_instances, base_options = self._check_create_parameters(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
@@ -394,12 +404,11 @@ class API(base.Base):
injected_files, admin_password, zone_blob,
reservation_id)
block_device_mapping = block_device_mapping or []
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = self.create_db_entry_for_new_instance(context,
base_options, security_groups,
base_options, security_group,
block_device_mapping, num=num)
instances.append(instance)
instance_id = instance['id']
@@ -613,17 +622,53 @@ class API(base.Base):
"""
return self.get(context, instance_id)
def get_all_across_zones(self, context, reservation_id):
"""Get all instances with this reservation_id, across
all available Zones (if any).
"""
context = context.elevated()
instances = self.db.instance_get_all_by_reservation(
context, reservation_id)
def get_all(self, context, project_id=None, reservation_id=None,
fixed_ip=None, recurse_zones=False):
"""Get all instances filtered by one of the given parameters.
children = scheduler_api.call_zone_method(context, "list",
novaclient_collection_name="servers",
reservation_id=reservation_id)
If there is no filter and the context is an admin, it will retreive
all instances in the system.
"""
if reservation_id is not None:
recurse_zones = True
instances = self.db.instance_get_all_by_reservation(
context, reservation_id)
elif fixed_ip is not None:
try:
instances = self.db.fixed_ip_get_instance(context, fixed_ip)
except exception.FloatingIpNotFound, e:
if not recurse_zones:
raise
instances = None
elif project_id or not context.is_admin:
if not context.project:
instances = self.db.instance_get_all_by_user(
context, context.user_id)
else:
if project_id is None:
project_id = context.project_id
instances = self.db.instance_get_all_by_project(
context, project_id)
else:
instances = self.db.instance_get_all(context)
if instances is None:
instances = []
elif not isinstance(instances, list):
instances = [instances]
if not recurse_zones:
return instances
admin_context = context.elevated()
children = scheduler_api.call_zone_method(admin_context,
"list",
novaclient_collection_name="servers",
reservation_id=reservation_id,
project_id=project_id,
fixed_ip=fixed_ip,
recurse_zones=True)
for zone, servers in children:
for server in servers:
@@ -632,32 +677,6 @@ class API(base.Base):
instances.append(server._info)
return instances
def get_all(self, context, project_id=None, reservation_id=None,
fixed_ip=None):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retreive
all instances in the system.
"""
if reservation_id is not None:
return self.get_all_across_zones(context, reservation_id)
if fixed_ip is not None:
return self.db.fixed_ip_get_instance(context, fixed_ip)
if project_id or not context.is_admin:
if not context.project:
return self.db.instance_get_all_by_user(
context, context.user_id)
if project_id is None:
project_id = context.project_id
return self.db.instance_get_all_by_project(
context, project_id)
return self.db.instance_get_all(context)
def _cast_compute_message(self, method, context, instance_id, host=None,
params=None):
"""Generic handler for RPC casts to compute.
@@ -690,7 +709,7 @@ class API(base.Base):
params = {}
if not host:
instance = self.get(context, instance_id)
host = instance["host"]
host = instance['host']
queue = self.db.queue_get_for(context, FLAGS.compute_topic, host)
params['instance_id'] = instance_id
kwargs = {'method': method, 'args': params}
@@ -711,19 +730,60 @@ class API(base.Base):
raise exception.Error(_("Unable to find host for Instance %s")
% instance_id)
def backup(self, context, instance_id, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param name: name of the backup or snapshot
name = backup_type # daily backups are called 'daily'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
recv_meta = self._create_image(context, instance_id, name, 'backup',
backup_type=backup_type, rotation=rotation,
extra_properties=extra_properties)
return recv_meta
def snapshot(self, context, instance_id, name, extra_properties=None):
"""Snapshot the given instance.
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: A dict containing image metadata
"""
properties = {'instance_id': str(instance_id),
return self._create_image(context, instance_id, name, 'snapshot',
extra_properties=extra_properties)
def _create_image(self, context, instance_id, name, image_type,
backup_type=None, rotation=None, extra_properties=None):
"""Create snapshot or backup for an instance on this host.
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
instance = db.api.instance_get(context, instance_id)
properties = {'instance_uuid': instance['uuid'],
'user_id': str(context.user_id),
'image_state': 'creating'}
'image_state': 'creating',
'image_type': image_type,
'backup_type': backup_type}
properties.update(extra_properties or {})
sent_meta = {'name': name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = self.image_service.create(context, sent_meta)
params = {'image_id': recv_meta['id']}
params = {'image_id': recv_meta['id'], 'image_type': image_type,
'backup_type': backup_type, 'rotation': rotation}
self._cast_compute_message('snapshot_instance', context, instance_id,
params=params)
return recv_meta
@@ -795,13 +855,24 @@ class API(base.Base):
self.db.instance_update(context, instance_id,
{'host': migration_ref['dest_compute'], })
def resize(self, context, instance_id, flavor_id):
"""Resize a running instance."""
def resize(self, context, instance_id, flavor_id=None):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
instance = self.db.instance_get(context, instance_id)
current_instance_type = instance['instance_type']
new_instance_type = self.db.instance_type_get_by_flavor_id(
context, flavor_id)
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."))
new_instance_type = current_instance_type
else:
new_instance_type = self.db.instance_type_get_by_flavor_id(
context, flavor_id)
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
@@ -815,7 +886,8 @@ class API(base.Base):
if current_memory_mb > new_memory_mb:
raise exception.ApiError(_("Invalid flavor: cannot downsize"
"instances"))
if current_memory_mb == new_memory_mb:
if (current_memory_mb == new_memory_mb) and flavor_id:
raise exception.ApiError(_("Invalid flavor: cannot use"
"the same flavor. "))
@@ -823,7 +895,24 @@ class API(base.Base):
{"method": "prep_resize",
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id,
"flavor_id": flavor_id}})
"flavor_id": new_instance_type['id']}})
@scheduler_api.reroute_compute("add_fixed_ip")
def add_fixed_ip(self, context, instance_id, network_id):
"""Add fixed_ip from specified network to given instance."""
self._cast_compute_message('add_fixed_ip_to_instance', context,
instance_id,
network_id)
#TODO(tr3buchet): how to run this in the correct zone?
def add_network_to_project(self, context, project_id):
"""Force adds a network to the project."""
# this will raise if zone doesn't know about project so the decorator
# can catch it and pass it down
self.db.project_get(context, project_id)
# didn't raise so this is the correct zone
self.network_api.add_network_to_project(context, project_id)
@scheduler_api.reroute_compute("pause")
def pause(self, context, instance_id):
@@ -835,6 +924,11 @@ class API(base.Base):
"""Unpause the given instance."""
self._cast_compute_message('unpause_instance', context, instance_id)
def set_host_enabled(self, context, host, enabled):
"""Sets the specified host's ability to accept new instances."""
return self._call_compute_message("set_host_enabled", context,
instance_id=None, host=host, params={"enabled": enabled})
@scheduler_api.reroute_compute("diagnostics")
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
@@ -967,11 +1061,34 @@ class API(base.Base):
return instance
def associate_floating_ip(self, context, instance_id, address):
"""Associate a floating ip with an instance."""
"""Makes calls to network_api to associate_floating_ip.
:param address: is a string floating ip address
"""
instance = self.get(context, instance_id)
# TODO(tr3buchet): currently network_info doesn't contain floating IPs
# in its info, if this changes, the next few lines will need to
# accomodate the info containing floating as well as fixed ip addresses
fixed_ip_addrs = []
for info in self.network_api.get_instance_nw_info(context,
instance):
ips = info[1]['ips']
fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in ips])
# TODO(tr3buchet): this will associate the floating IP with the first
# fixed_ip (lowest id) an instance has. This should be changed to
# support specifying a particular fixed_ip if multiple exist.
if not fixed_ip_addrs:
msg = _("instance |%s| has no fixed_ips. "
"unable to associate floating ip") % instance_id
raise exception.ApiError(msg)
if len(fixed_ip_addrs) > 1:
LOG.warning(_("multiple fixed_ips exist, using the first: %s"),
fixed_ip_addrs[0])
self.network_api.associate_floating_ip(context,
floating_ip=address,
fixed_ip=instance['fixed_ip'])
fixed_ip=fixed_ip_addrs[0])
def get_instance_metadata(self, context, instance_id):
"""Get all metadata associated with an instance."""
+172 -100
View File
@@ -46,6 +46,7 @@ from eventlet import greenthread
from nova import exception
from nova import flags
import nova.image
from nova import log as logging
from nova import manager
from nova import network
@@ -53,6 +54,7 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
from nova.notifier import api as notifier_api
from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -129,9 +131,9 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1)
self.network_api = network.API()
self.network_manager = utils.import_object(FLAGS.network_manager)
self.volume_manager = utils.import_object(FLAGS.volume_manager)
self.network_api = network.API()
self._last_host_check = 0
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
@@ -178,20 +180,6 @@ class ComputeManager(manager.SchedulerDependentManager):
FLAGS.console_topic,
FLAGS.console_host)
def get_network_topic(self, context, **kwargs):
"""Retrieves the network host for a project on this host."""
# TODO(vish): This method should be memoized. This will make
# the call to get_network_host cheaper, so that
# it can pas messages instead of checking the db
# locally.
if FLAGS.stub_network:
host = FLAGS.network_host
else:
host = self.network_manager.get_network_host(context)
return self.db.queue_get_for(context,
FLAGS.network_topic,
host)
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@@ -279,10 +267,10 @@ class ComputeManager(manager.SchedulerDependentManager):
def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance_ref.injected_files = kwargs.get('injected_files', [])
instance_ref.admin_pass = kwargs.get('admin_password', None)
if instance_ref['name'] in self.driver.list_instances():
instance = self.db.instance_get(context, instance_id)
instance.injected_files = kwargs.get('injected_files', [])
instance.admin_pass = kwargs.get('admin_password', None)
if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
@@ -295,54 +283,45 @@ class ComputeManager(manager.SchedulerDependentManager):
power_state.NOSTATE,
'networking')
is_vpn = instance_ref['image_ref'] == str(FLAGS.vpn_image_id)
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
# NOTE(vish): This could be a cast because we don't do anything
# with the address currently, but I'm leaving it as
# a call to ensure that network setup completes. We
# will eventually also need to save the address here.
if not FLAGS.stub_network:
address = rpc.call(context,
self.get_network_topic(context),
{"method": "allocate_fixed_ip",
"args": {"instance_id": instance_id,
"vpn": is_vpn}})
network_info = self.network_api.allocate_for_instance(context,
instance, vpn=is_vpn)
LOG.debug(_("instance network_info: |%s|"), network_info)
self.network_manager.setup_compute_network(context,
instance_id)
else:
# TODO(tr3buchet) not really sure how this should be handled.
# virt requires network_info to be passed in but stub_network
# is enabled. Setting to [] for now will cause virt to skip
# all vif creation and network injection, maybe this is correct
network_info = []
block_device_mapping = self._setup_block_device_mapping(
context,
instance_id)
bd_mapping = self._setup_block_device_mapping(context, instance_id)
# TODO(vish) check to make sure the availability zone matches
self._update_state(context, instance_id, power_state.BUILDING)
try:
self.driver.spawn(instance_ref,
block_device_mapping=block_device_mapping)
self.driver.spawn(instance, network_info, bd_mapping)
except Exception as ex: # pylint: disable=W0702
msg = _("Instance '%(instance_id)s' failed to spawn. Is "
"virtualization enabled in the BIOS? Details: "
"%(ex)s") % locals()
LOG.exception(msg)
if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip:
public_ip = self.network_api.allocate_floating_ip(context)
self.db.floating_ip_set_auto_assigned(context, public_ip)
fixed_ip = self.db.fixed_ip_get_by_address(context, address)
floating_ip = self.db.floating_ip_get_by_address(context,
public_ip)
self.network_api.associate_floating_ip(
context,
floating_ip,
fixed_ip,
affect_auto_assigned=True)
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
usage_info = utils.usage_from_instance(instance)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.create',
notifier_api.INFO,
usage_info)
except exception.InstanceNotFound:
# FIXME(wwolf): We are just ignoring InstanceNotFound
# exceptions here in case the instance was immediately
@@ -365,53 +344,24 @@ class ComputeManager(manager.SchedulerDependentManager):
def _shutdown_instance(self, context, instance_id, action_str):
"""Shutdown an instance on this host."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance = self.db.instance_get(context, instance_id)
LOG.audit(_("%(action_str)s instance %(instance_id)s") %
{'action_str': action_str, 'instance_id': instance_id},
context=context)
fixed_ip = instance_ref.get('fixed_ip')
if not FLAGS.stub_network and fixed_ip:
floating_ips = fixed_ip.get('floating_ips') or []
for floating_ip in floating_ips:
address = floating_ip['address']
LOG.debug("Disassociating address %s", address,
context=context)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later.
self.network_api.disassociate_floating_ip(context,
address,
True)
if (FLAGS.auto_assign_floating_ip
and floating_ip.get('auto_assigned')):
LOG.debug(_("Deallocating floating ip %s"),
floating_ip['address'],
context=context)
self.network_api.release_floating_ip(context,
address,
True)
if not FLAGS.stub_network:
self.network_api.deallocate_for_instance(context, instance)
address = fixed_ip['address']
if address:
LOG.debug(_("Deallocating address %s"), address,
context=context)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
self.network_manager.deallocate_fixed_ip(context.elevated(),
address)
volumes = instance_ref.get('volumes') or []
volumes = instance.get('volumes') or []
for volume in volumes:
self._detach_volume(context, instance_id, volume['id'], False)
if (instance_ref['state'] == power_state.SHUTOFF and
instance_ref['state_description'] != 'stopped'):
if (instance['state'] == power_state.SHUTOFF and
instance['state_description'] != 'stopped'):
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
self.driver.destroy(instance_ref)
self.driver.destroy(instance)
if action_str == 'Terminating':
terminate_volumes(self.db, context, instance_id)
@@ -421,9 +371,15 @@ class ComputeManager(manager.SchedulerDependentManager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
instance = self.db.instance_get(context.elevated(), instance_id)
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
usage_info = utils.usage_from_instance(instance)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.delete',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -460,6 +416,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_image_ref(context, instance_id, image_ref)
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
usage_info = utils.usage_from_instance(instance_ref,
image_ref=image_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.rebuild',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -487,8 +449,19 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_state(context, instance_id)
@exception.wrap_exception
def snapshot_instance(self, context, instance_id, image_id):
"""Snapshot an instance on this host."""
def snapshot_instance(self, context, instance_id, image_id,
image_type='snapshot', backup_type=None,
rotation=None):
"""Snapshot an instance on this host.
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param image_id: glance.db.sqlalchemy.models.Image.Id
:param image_type: snapshot | backup
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@@ -508,6 +481,65 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.snapshot(instance_ref, image_id)
if image_type == 'snapshot':
if rotation:
raise exception.ImageRotationNotAllowed()
elif image_type == 'backup':
if rotation:
instance_uuid = instance_ref['uuid']
self.rotate_backups(context, instance_uuid, backup_type,
rotation)
else:
raise exception.RotationRequiredForBackup()
else:
raise Exception(_('Image type not recognized %s') % image_type)
def rotate_backups(self, context, instance_uuid, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance_uuid: string representing uuid of instance
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
# NOTE(jk0): Eventually extract this out to the ImageService?
def fetch_images():
images = []
marker = None
while True:
batch = image_service.detail(context, filters=filters,
marker=marker, sort_key='created_at', sort_dir='desc')
if not batch:
break
images += batch
marker = batch[-1]['id']
return images
image_service = nova.image.get_default_image_service()
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance_uuid}
images = fetch_images()
num_images = len(images)
LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)"
% locals()))
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug(_("Rotating out %d backups" % excess))
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug(_("Deleting image %d" % image_id))
image_service.delete(context, image_id)
@exception.wrap_exception
@checks_instance_lock
def set_admin_password(self, context, instance_id, new_pass=None):
@@ -637,6 +669,11 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
self.driver.destroy(instance_ref)
usage_info = utils.usage_from_instance(instance_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.resize.confirm',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -684,6 +721,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.revert_resize(instance_ref)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
usage_info = utils.usage_from_instance(instance_ref)
notifier_api.notify('compute.%s' % self.host,
'compute.instance.resize.revert',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -720,6 +762,13 @@ class ComputeManager(manager.SchedulerDependentManager):
'migration_id': migration_ref['id'],
'instance_id': instance_id, },
})
usage_info = utils.usage_from_instance(instance_ref,
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
notifier_api.notify('compute.%s' % self.host,
'compute.instance.resize.prep',
notifier_api.INFO,
usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -771,12 +820,26 @@ class ComputeManager(manager.SchedulerDependentManager):
# reload the updated instance ref
# FIXME(mdietz): is there reload functionality?
instance_ref = self.db.instance_get(context, instance_id)
self.driver.finish_resize(instance_ref, disk_info)
instance = self.db.instance_get(context, instance_id)
network_info = self.network_api.get_instance_nw_info(context,
instance)
self.driver.finish_resize(instance, disk_info, network_info)
self.db.migration_update(context, migration_id,
{'status': 'finished', })
@exception.wrap_exception
@checks_instance_lock
def add_fixed_ip_to_instance(self, context, instance_id, network_id):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self.network_api.add_fixed_ip_to_instance(context, instance_id,
network_id)
self.inject_network_info(context, instance_id)
self.reset_network(context, instance_id)
@exception.wrap_exception
@checks_instance_lock
def pause_instance(self, context, instance_id):
@@ -811,6 +874,12 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
result))
@exception.wrap_exception
def set_host_enabled(self, context, instance_id=None, host=None,
enabled=None):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(host, enabled)
@exception.wrap_exception
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
@@ -880,20 +949,22 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def reset_network(self, context, instance_id):
"""Reset networking on the given instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: reset network'), instance_id,
context=context)
self.driver.reset_network(instance_ref)
self.driver.reset_network(instance)
@checks_instance_lock
def inject_network_info(self, context, instance_id):
"""Inject network info for the given instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: inject network info'), instance_id,
context=context)
self.driver.inject_network_info(instance_ref)
instance = self.db.instance_get(context, instance_id)
network_info = self.network_api.get_instance_nw_info(context,
instance)
LOG.debug(_("network_info to inject: |%s|"), network_info)
self.driver.inject_network_info(instance, network_info)
@exception.wrap_exception
def get_console_output(self, context, instance_id):
@@ -1087,16 +1158,16 @@ class ComputeManager(manager.SchedulerDependentManager):
# Getting instance info
instance_ref = self.db.instance_get(context, instance_id)
ec2_id = instance_ref['hostname']
hostname = instance_ref['hostname']
# Getting fixed ips
fixed_ip = self.db.instance_get_fixed_address(context, instance_id)
if not fixed_ip:
raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id)
fixed_ips = self.db.instance_get_fixed_addresses(context, instance_id)
if not fixed_ips:
raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
# If any volume is mounted, prepare here.
if not instance_ref['volumes']:
LOG.info(_("%s has no volume."), ec2_id)
LOG.info(_("%s has no volume."), hostname)
else:
for v in instance_ref['volumes']:
self.volume_manager.setup_compute_volume(context, v['id'])
@@ -1119,7 +1190,7 @@ class ComputeManager(manager.SchedulerDependentManager):
raise
else:
LOG.warn(_("setup_compute_network() failed %(cnt)d."
"Retry up to %(max_retry)d for %(ec2_id)s.")
"Retry up to %(max_retry)d for %(hostname)s.")
% locals())
time.sleep(1)
@@ -1216,9 +1287,10 @@ class ComputeManager(manager.SchedulerDependentManager):
{'host': dest})
except exception.NotFound:
LOG.info(_('No floating_ip is found for %s.'), i_name)
except:
LOG.error(_("Live migration: Unexpected error:"
"%s cannot inherit floating ip..") % i_name)
except Exception, e:
LOG.error(_("Live migration: Unexpected error: "
"%(i_name)s cannot inherit floating "
"ip.\n%(e)s") % (locals()))
# Restore instance/volume state
self.recover_live_migration(ctxt, instance_ref, dest)
+134 -41
View File
@@ -55,11 +55,6 @@ IMPL = utils.LazyPluggable(FLAGS['db_backend'],
sqlalchemy='nova.db.sqlalchemy.api')
class NoMoreAddresses(exception.Error):
"""No more available addresses."""
pass
class NoMoreBlades(exception.Error):
"""No more available blades."""
pass
@@ -223,14 +218,17 @@ def certificate_update(context, certificate_id, values):
###################
def floating_ip_get(context, id):
return IMPL.floating_ip_get(context, id)
def floating_ip_allocate_address(context, host, project_id):
def floating_ip_allocate_address(context, project_id):
"""Allocate free floating ip and return the address.
Raises if one is not available.
"""
return IMPL.floating_ip_allocate_address(context, host, project_id)
return IMPL.floating_ip_allocate_address(context, project_id)
def floating_ip_create(context, values):
@@ -321,6 +319,7 @@ def migration_get_by_instance_and_status(context, instance_id, status):
return IMPL.migration_get_by_instance_and_status(context, instance_id,
status)
####################
@@ -372,9 +371,14 @@ def fixed_ip_get_by_address(context, address):
return IMPL.fixed_ip_get_by_address(context, address)
def fixed_ip_get_all_by_instance(context, instance_id):
def fixed_ip_get_by_instance(context, instance_id):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_all_by_instance(context, instance_id)
return IMPL.fixed_ip_get_by_instance(context, instance_id)
def fixed_ip_get_by_virtual_interface(context, vif_id):
"""Get fixed ips by virtual interface or raise if none exist."""
return IMPL.fixed_ip_get_by_virtual_interface(context, vif_id)
def fixed_ip_get_instance(context, address):
@@ -399,6 +403,62 @@ def fixed_ip_update(context, address, values):
####################
def virtual_interface_create(context, values):
"""Create a virtual interface record in the database."""
return IMPL.virtual_interface_create(context, values)
def virtual_interface_update(context, vif_id, values):
"""Update a virtual interface record in the database."""
return IMPL.virtual_interface_update(context, vif_id, values)
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table,"""
return IMPL.virtual_interface_get(context, vif_id)
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table filtering on address."""
return IMPL.virtual_interface_get_by_address(context, address)
def virtual_interface_get_by_fixed_ip(context, fixed_ip_id):
"""Gets the virtual interface fixed_ip is associated with."""
return IMPL.virtual_interface_get_by_fixed_ip(context, fixed_ip_id)
def virtual_interface_get_by_instance(context, instance_id):
"""Gets all virtual_interfaces for instance."""
return IMPL.virtual_interface_get_by_instance(context, instance_id)
def virtual_interface_get_by_instance_and_network(context, instance_id,
network_id):
"""Gets all virtual interfaces for instance."""
return IMPL.virtual_interface_get_by_instance_and_network(context,
instance_id,
network_id)
def virtual_interface_get_by_network(context, network_id):
"""Gets all virtual interfaces on network."""
return IMPL.virtual_interface_get_by_network(context, network_id)
def virtual_interface_delete(context, vif_id):
"""Delete virtual interface record from the database."""
return IMPL.virtual_interface_delete(context, vif_id)
def virtual_interface_delete_by_instance(context, instance_id):
"""Delete virtual interface records associated with instance."""
return IMPL.virtual_interface_delete_by_instance(context, instance_id)
####################
def instance_create(context, values):
"""Create an instance from the values dictionary."""
return IMPL.instance_create(context, values)
@@ -434,6 +494,11 @@ def instance_get_all(context):
return IMPL.instance_get_all(context)
def instance_get_active_by_window(context, begin, end=None):
"""Get instances active during a certain time window."""
return IMPL.instance_get_active_by_window(context, begin, end)
def instance_get_all_by_user(context, user_id):
"""Get all instances."""
return IMPL.instance_get_all_by_user(context, user_id)
@@ -454,13 +519,13 @@ def instance_get_all_by_reservation(context, reservation_id):
return IMPL.instance_get_all_by_reservation(context, reservation_id)
def instance_get_fixed_address(context, instance_id):
def instance_get_fixed_addresses(context, instance_id):
"""Get the fixed ip address of an instance."""
return IMPL.instance_get_fixed_address(context, instance_id)
return IMPL.instance_get_fixed_addresses(context, instance_id)
def instance_get_fixed_address_v6(context, instance_id):
return IMPL.instance_get_fixed_address_v6(context, instance_id)
def instance_get_fixed_addresses_v6(context, instance_id):
return IMPL.instance_get_fixed_addresses_v6(context, instance_id)
def instance_get_floating_address(context, instance_id):
@@ -555,9 +620,9 @@ def key_pair_get_all_by_user(context, user_id):
####################
def network_associate(context, project_id):
def network_associate(context, project_id, force=False):
"""Associate a free network to a project."""
return IMPL.network_associate(context, project_id)
return IMPL.network_associate(context, project_id, force)
def network_count(context):
@@ -650,6 +715,11 @@ def network_get_all_by_instance(context, instance_id):
return IMPL.network_get_all_by_instance(context, instance_id)
def network_get_all_by_host(context, host):
"""All networks for which the given host is the network host."""
return IMPL.network_get_all_by_host(context, host)
def network_get_index(context, network_id):
"""Get non-conflicting index for network."""
return IMPL.network_get_index(context, network_id)
@@ -682,23 +752,6 @@ def network_update(context, network_id, values):
###################
def project_get_network(context, project_id, associate=True):
"""Return the network associated with the project.
If associate is true, it will attempt to associate a new
network if one is not found, otherwise it returns None.
"""
return IMPL.project_get_network(context, project_id, associate)
def project_get_network_v6(context, project_id):
return IMPL.project_get_network_v6(context, project_id)
###################
def queue_get_for(context, topic, physical_node_id):
"""Return a channel to send a message to a node with a topic."""
return IMPL.queue_get_for(context, topic, physical_node_id)
@@ -1044,6 +1097,16 @@ def provider_fw_rule_get_all(context):
return IMPL.provider_fw_rule_get_all(context)
def provider_fw_rule_get_all_by_cidr(context, cidr):
"""Get all provider-level firewall rules."""
return IMPL.provider_fw_rule_get_all_by_cidr(context, cidr)
def provider_fw_rule_destroy(context, rule_id):
"""Delete a provider firewall rule from the database."""
return IMPL.provider_fw_rule_destroy(context, rule_id)
###################
@@ -1112,6 +1175,9 @@ def user_update(context, user_id, values):
return IMPL.user_update(context, user_id, values)
###################
def project_get(context, id):
"""Get project by id."""
return IMPL.project_get(context, id)
@@ -1152,17 +1218,23 @@ def project_delete(context, project_id):
return IMPL.project_delete(context, project_id)
def project_get_networks(context, project_id, associate=True):
"""Return the network associated with the project.
If associate is true, it will attempt to associate a new
network if one is not found, otherwise it returns None.
"""
return IMPL.project_get_networks(context, project_id, associate)
def project_get_networks_v6(context, project_id):
return IMPL.project_get_networks_v6(context, project_id)
###################
def host_get_networks(context, host):
"""All networks for which the given host is the network host."""
return IMPL.host_get_networks(context, host)
##################
def console_pool_create(context, values):
"""Create console pool."""
return IMPL.console_pool_create(context, values)
@@ -1266,7 +1338,7 @@ def zone_create(context, values):
def zone_update(context, zone_id, values):
"""Update a child Zone entry."""
return IMPL.zone_update(context, values)
return IMPL.zone_update(context, zone_id, values)
def zone_delete(context, zone_id):
@@ -1329,3 +1401,24 @@ def agent_build_destroy(context, agent_update_id):
def agent_build_update(context, agent_build_id, values):
"""Update agent build entry."""
IMPL.agent_build_update(context, agent_build_id, values)
####################
def instance_type_extra_specs_get(context, instance_type_id):
"""Get all extra specs for an instance type."""
return IMPL.instance_type_extra_specs_get(context, instance_type_id)
def instance_type_extra_specs_delete(context, instance_type_id, key):
"""Delete the given extra specs item."""
IMPL.instance_type_extra_specs_delete(context, instance_type_id, key)
def instance_type_extra_specs_update_or_create(context, instance_type_id,
extra_specs):
"""Create or update instance type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument"""
IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id,
extra_specs)
+621 -205
View File
File diff suppressed because it is too large Load Diff
@@ -58,8 +58,7 @@ provider_fw_rules = Table('provider_fw_rules', meta,
Column('to_port', Integer()),
Column('cidr',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
)
unicode_error=None, _warn_on_bytestring=False)))
def upgrade(migrate_engine):
@@ -0,0 +1,67 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
from sqlalchemy import MetaData, String, Table
from nova import log as logging
meta = MetaData()
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instance_types = Table('instance_types', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
instance_type_extra_specs_table = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_type_id',
Integer(),
ForeignKey('instance_types.id'),
nullable=False),
Column('key',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('value',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
for table in (instance_type_extra_specs_table, ):
try:
table.create()
except Exception:
logging.info(repr(table))
logging.exception('Exception while creating table')
raise
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
for table in (instance_type_extra_specs_table, ):
table.drop()
@@ -0,0 +1,38 @@
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Float, Integer, MetaData, Table
meta = MetaData()
zones = Table('zones', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
weight_offset = Column('weight_offset', Float(), default=0.0)
weight_scale = Column('weight_scale', Float(), default=1.0)
def upgrade(migrate_engine):
meta.bind = migrate_engine
zones.create_column(weight_offset)
zones.create_column(weight_scale)
def downgrade(migrate_engine):
meta.bind = migrate_engine
zones.drop_column(weight_offset)
zones.drop_column(weight_scale)
@@ -0,0 +1,125 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from sqlalchemy import *
from migrate import *
from nova import log as logging
from nova import utils
meta = MetaData()
# virtual interface table to add to DB
virtual_interfaces = Table('virtual_interfaces', meta,
Column('created_at', DateTime(timezone=False),
default=utils.utcnow()),
Column('updated_at', DateTime(timezone=False),
onupdate=utils.utcnow()),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
Column('network_id',
Integer(),
ForeignKey('networks.id')),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=False),
mysql_engine='InnoDB')
# bridge_interface column to add to networks table
interface = Column('bridge_interface',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False))
# virtual interface id column to add to fixed_ips table
# foreignkey added in next migration
virtual_interface_id = Column('virtual_interface_id',
Integer())
def upgrade(migrate_engine):
meta.bind = migrate_engine
# grab tables and (column for dropping later)
instances = Table('instances', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
fixed_ips = Table('fixed_ips', meta, autoload=True)
c = instances.columns['mac_address']
# add interface column to networks table
# values will have to be set manually before running nova
try:
networks.create_column(interface)
except Exception:
logging.error(_("interface column not added to networks table"))
raise
# create virtual_interfaces table
try:
virtual_interfaces.create()
except Exception:
logging.error(_("Table |%s| not created!"), repr(virtual_interfaces))
raise
# add virtual_interface_id column to fixed_ips table
try:
fixed_ips.create_column(virtual_interface_id)
except Exception:
logging.error(_("VIF column not added to fixed_ips table"))
raise
# populate the virtual_interfaces table
# extract data from existing instance and fixed_ip tables
s = select([instances.c.id, instances.c.mac_address,
fixed_ips.c.network_id],
fixed_ips.c.instance_id == instances.c.id)
keys = ('instance_id', 'address', 'network_id')
join_list = [dict(zip(keys, row)) for row in s.execute()]
logging.debug(_("join list for moving mac_addresses |%s|"), join_list)
# insert data into the table
if join_list:
i = virtual_interfaces.insert()
i.execute(join_list)
# populate the fixed_ips virtual_interface_id column
s = select([fixed_ips.c.id, fixed_ips.c.instance_id],
fixed_ips.c.instance_id != None)
for row in s.execute():
m = select([virtual_interfaces.c.id]).\
where(virtual_interfaces.c.instance_id == row['instance_id']).\
as_scalar()
u = fixed_ips.update().values(virtual_interface_id=m).\
where(fixed_ips.c.id == row['id'])
u.execute()
# drop the mac_address column from instances
c.drop()
def downgrade(migrate_engine):
logging.error(_("Can't downgrade without losing data"))
raise Exception
@@ -0,0 +1,56 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from sqlalchemy import *
from migrate import *
from nova import log as logging
from nova import utils
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
# grab tables
fixed_ips = Table('fixed_ips', meta, autoload=True)
virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
# add foreignkey if not sqlite
try:
if not dialect.startswith('sqlite'):
ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
refcolumns=[virtual_interfaces.c.id]).create()
except Exception:
logging.error(_("foreign key constraint couldn't be added"))
raise
def downgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
# drop foreignkey if not sqlite
try:
if not dialect.startswith('sqlite'):
ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
refcolumns=[virtual_interfaces.c.id]).drop()
except Exception:
logging.error(_("foreign key constraint couldn't be dropped"))
raise
@@ -0,0 +1,48 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE fixed_ips_backup (
id INTEGER NOT NULL,
address VARCHAR(255),
virtual_interface_id INTEGER,
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN default FALSE,
leased BOOLEAN default FALSE,
reserved BOOLEAN default FALSE,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id)
);
INSERT INTO fixed_ips_backup
SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
FROM fixed_ips;
DROP TABLE fixed_ips;
CREATE TABLE fixed_ips (
id INTEGER NOT NULL,
address VARCHAR(255),
virtual_interface_id INTEGER,
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN default FALSE,
leased BOOLEAN default FALSE,
reserved BOOLEAN default FALSE,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id)
);
INSERT INTO fixed_ips
SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
FROM fixed_ips;
DROP TABLE fixed_ips_backup;
COMMIT;
@@ -0,0 +1,48 @@
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE fixed_ips_backup (
id INTEGER NOT NULL,
address VARCHAR(255),
virtual_interface_id INTEGER,
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN default FALSE,
leased BOOLEAN default FALSE,
reserved BOOLEAN default FALSE,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id)
);
INSERT INTO fixed_ips_backup
SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
FROM fixed_ips;
DROP TABLE fixed_ips;
CREATE TABLE fixed_ips (
id INTEGER NOT NULL,
address VARCHAR(255),
virtual_interface_id INTEGER,
network_id INTEGER,
instance_id INTEGER,
allocated BOOLEAN default FALSE,
leased BOOLEAN default FALSE,
reserved BOOLEAN default FALSE,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id)
);
INSERT INTO fixed_ips
SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
FROM fixed_ips;
DROP TABLE fixed_ips_backup;
COMMIT;
+79 -44
View File
@@ -21,7 +21,7 @@ SQLAlchemy models for nova data.
from sqlalchemy.orm import relationship, backref, object_mapper
from sqlalchemy import Column, Integer, String, schema
from sqlalchemy import ForeignKey, DateTime, Boolean, Text
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import ForeignKeyConstraint
@@ -209,12 +209,12 @@ class Instance(BASE, NovaBase):
hostname = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
# aka flavor_id
instance_type_id = Column(Integer)
user_data = Column(Text)
reservation_id = Column(String(255))
mac_address = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
@@ -548,6 +548,7 @@ class Network(BASE, NovaBase):
netmask_v6 = Column(String(255))
netmask = Column(String(255))
bridge = Column(String(255))
bridge_interface = Column(String(255))
gateway = Column(String(255))
broadcast = Column(String(255))
dns = Column(String(255))
@@ -558,13 +559,67 @@ class Network(BASE, NovaBase):
vpn_private_address = Column(String(255))
dhcp_start = Column(String(255))
# NOTE(vish): The unique constraint below helps avoid a race condition
# when associating a network, but it also means that we
# can't associate two networks with one project.
project_id = Column(String(255), unique=True)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
class VirtualInterface(BASE, NovaBase):
"""Represents a virtual interface on an instance."""
__tablename__ = 'virtual_interfaces'
id = Column(Integer, primary_key=True)
address = Column(String(255), unique=True)
network_id = Column(Integer, ForeignKey('networks.id'))
network = relationship(Network, backref=backref('virtual_interfaces'))
# TODO(tr3buchet): cut the cord, removed foreign key and backrefs
instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
instance = relationship(Instance, backref=backref('virtual_interfaces'))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
network_id = Column(Integer, ForeignKey('networks.id'), nullable=True)
network = relationship(Network, backref=backref('fixed_ips'))
virtual_interface_id = Column(Integer, ForeignKey('virtual_interfaces.id'),
nullable=True)
virtual_interface = relationship(VirtualInterface,
backref=backref('fixed_ips'))
instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
instance = relationship(Instance,
backref=backref('fixed_ips'),
foreign_keys=instance_id,
primaryjoin='and_('
'FixedIp.instance_id == Instance.id,'
'FixedIp.deleted == False)')
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has a its virtual_interface_id column set
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
leased = Column(Boolean, default=False)
reserved = Column(Boolean, default=False)
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True)
fixed_ip = relationship(FixedIp,
backref=backref('floating_ips'),
foreign_keys=fixed_ip_id,
primaryjoin='and_('
'FloatingIp.fixed_ip_id == FixedIp.id,'
'FloatingIp.deleted == False)')
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False, nullable=False)
class AuthToken(BASE, NovaBase):
"""Represents an authorization token for all API transactions.
@@ -580,26 +635,6 @@ class AuthToken(BASE, NovaBase):
cdn_management_url = Column(String(255))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
network_id = Column(Integer, ForeignKey('networks.id'), nullable=True)
network = relationship(Network, backref=backref('fixed_ips'))
instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
instance = relationship(Instance,
backref=backref('fixed_ip', uselist=False),
foreign_keys=instance_id,
primaryjoin='and_('
'FixedIp.instance_id == Instance.id,'
'FixedIp.deleted == False)')
allocated = Column(Boolean, default=False)
leased = Column(Boolean, default=False)
reserved = Column(Boolean, default=False)
class User(BASE, NovaBase):
"""Represents a user."""
__tablename__ = 'users'
@@ -660,23 +695,6 @@ class UserProjectAssociation(BASE, NovaBase):
project_id = Column(String(255), ForeignKey(Project.id), primary_key=True)
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True)
fixed_ip = relationship(FixedIp,
backref=backref('floating_ips'),
foreign_keys=fixed_ip_id,
primaryjoin='and_('
'FloatingIp.fixed_ip_id == FixedIp.id,'
'FloatingIp.deleted == False)')
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False, nullable=False)
class ConsolePool(BASE, NovaBase):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
@@ -716,6 +734,21 @@ class InstanceMetadata(BASE, NovaBase):
'InstanceMetadata.deleted == False)')
class InstanceTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for an instance_type"""
__tablename__ = 'instance_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == False)')
class Zone(BASE, NovaBase):
"""Represents a child zone of this zone."""
__tablename__ = 'zones'
@@ -723,6 +756,8 @@ class Zone(BASE, NovaBase):
api_url = Column(String(255))
username = Column(String(255))
password = Column(String(255))
weight_offset = Column(Float(), default=0.0)
weight_scale = Column(Float(), default=1.0)
class AgentBuild(BASE, NovaBase):
@@ -750,7 +785,7 @@ def register_models():
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
AgentBuild, InstanceMetadata, Migration)
AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
+73 -15
View File
@@ -118,6 +118,15 @@ class NovaException(Exception):
return self._error_string
class VirtualInterfaceCreateException(NovaException):
message = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
message = _("5 attempts to create virtual interface"
"with unique mac address failed")
class NotAuthorized(NovaException):
message = _("Not authorized.")
@@ -356,28 +365,56 @@ class DatastoreNotFound(NotFound):
message = _("Could not find the datastore reference(s) which the VM uses.")
class NoFixedIpsFoundForInstance(NotFound):
class FixedIpNotFound(NotFound):
message = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
message = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
message = _("Instance %(instance_id)s has zero fixed ips.")
class FixedIpNotFoundForVirtualInterface(FixedIpNotFound):
message = _("Virtual interface %(vif_id)s has zero associated fixed ips.")
class FixedIpNotFoundForHost(FixedIpNotFound):
message = _("Host %(host)s has zero fixed ips.")
class NoMoreFixedIps(Error):
message = _("Zero fixed ips available.")
class NoFixedIpsDefined(NotFound):
message = _("Zero fixed ips could be found.")
class FloatingIpNotFound(NotFound):
message = _("Floating ip not found for fixed address %(fixed_ip)s.")
message = _("Floating ip not found for id %(id)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
message = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForProject(FloatingIpNotFound):
message = _("Floating ip not found for project %(project_id)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
message = _("Floating ip not found for host %(host)s.")
class NoMoreFloatingIps(FloatingIpNotFound):
message = _("Zero floating ips available.")
class NoFloatingIpsDefined(NotFound):
message = _("Zero floating ips could be found.")
class NoFloatingIpsDefinedForHost(NoFloatingIpsDefined):
message = _("Zero floating ips defined for host %(host)s.")
class NoFloatingIpsDefinedForInstance(NoFloatingIpsDefined):
message = _("Zero floating ips defined for instance %(instance_id)s.")
class NoMoreFloatingIps(NotFound):
message = _("Zero floating ips available.")
message = _("Zero floating ips exist.")
class KeypairNotFound(NotFound):
@@ -504,6 +541,11 @@ class InstanceMetadataNotFound(NotFound):
"key %(metadata_key)s.")
class InstanceTypeExtraSpecsNotFound(NotFound):
message = _("Instance Type %(instance_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class LDAPObjectNotFound(NotFound):
message = _("LDAP object could not be found")
@@ -549,6 +591,14 @@ class GlobalRoleNotAllowed(NotAllowed):
message = _("Unable to use global role %(role_id)s")
class ImageRotationNotAllowed(NovaException):
message = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(NovaException):
message = _("Rotation param is required for backup image_type")
#TODO(bcwaldon): EOL this exception!
class Duplicate(NovaException):
pass
@@ -589,3 +639,11 @@ class MigrationError(NovaException):
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
class PasteConfigNotFound(NotFound):
message = _("Could not find paste config at %(path)s")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
+2
View File
@@ -305,6 +305,8 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_list('enabled_apis', ['ec2', 'osapi'],
'list of APIs to enable by default')
DEFINE_string('ec2_host', '$my_ip', 'ip of api server')
DEFINE_string('ec2_dmz_host', '$my_ip', 'internal ip of api server')
DEFINE_integer('ec2_port', 8773, 'cloud controller port')
+1 -1
View File
@@ -59,7 +59,7 @@ class GlanceImageService(service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance."""
GLANCE_ONLY_ATTRS = ['size', 'location', 'disk_format',
'container_format']
'container_format', 'checksum']
# NOTE(sirp): Overriding to use _translate_to_service provided by
# BaseImageService
+11
View File
@@ -314,3 +314,14 @@ logging.setLoggerClass(NovaLogger)
def audit(msg, *args, **kwargs):
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
logging.root.log(AUDIT, msg, *args, **kwargs)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
+74 -14
View File
@@ -22,7 +22,6 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import quota
from nova import rpc
from nova.db import base
@@ -34,13 +33,21 @@ LOG = logging.getLogger('nova.network')
class API(base.Base):
"""API for interacting with the network manager."""
def get_floating_ip(self, context, id):
rv = self.db.floating_ip_get(context, id)
return dict(rv.iteritems())
def get_floating_ip_by_ip(self, context, address):
res = self.db.floating_ip_get_by_address(context, address)
return dict(res.iteritems())
def list_floating_ips(self, context):
ips = self.db.floating_ip_get_all_by_project(context,
context.project_id)
return ips
def allocate_floating_ip(self, context):
if quota.allowed_floating_ips(context, 1) < 1:
LOG.warn(_('Quota exceeeded for %s, tried to allocate '
'address'),
context.project_id)
raise quota.QuotaError(_('Address quota exceeded. You cannot '
'allocate any more addresses'))
"""Adds a floating ip to a project."""
# NOTE(vish): We don't know which network host should get the ip
# when we allocate, so just send it to any one. This
# will probably need to move into a network supervisor
@@ -52,6 +59,7 @@ class API(base.Base):
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes floating ip with address from a project."""
floating_ip = self.db.floating_ip_get_by_address(context, address)
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
return
@@ -65,8 +73,19 @@ class API(base.Base):
'args': {'floating_address': floating_ip['address']}})
def associate_floating_ip(self, context, floating_ip, fixed_ip,
affect_auto_assigned=False):
if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode):
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
ensures floating ip is allocated to the project in context
:param fixed_ip: is either fixed_ip object or a string fixed ip address
:param floating_ip: is a string floating ip address
"""
# NOTE(tr3buchet): i don't like the "either or" argument type
# funcationility but i've left it alone for now
# TODO(tr3buchet): this function needs to be rewritten to move
# the network related db lookups into the network host code
if isinstance(fixed_ip, basestring):
fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip)
floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
@@ -86,8 +105,6 @@ class API(base.Base):
'(%(project)s)') %
{'address': floating_ip['address'],
'project': context.project_id})
# NOTE(vish): Perhaps we should just pass this on to compute and
# let compute communicate with network.
host = fixed_ip['network']['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
@@ -97,15 +114,58 @@ class API(base.Base):
def disassociate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from fixed ip it is associated with."""
floating_ip = self.db.floating_ip_get_by_address(context, address)
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
return
if not floating_ip.get('fixed_ip'):
raise exception.ApiError('Address is not associated.')
# NOTE(vish): Get the topic from the host name of the network of
# the associated fixed ip.
host = floating_ip['fixed_ip']['network']['host']
rpc.cast(context,
rpc.call(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
{'method': 'disassociate_floating_ip',
'args': {'floating_address': floating_ip['address']}})
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocates all network structures for an instance.
:returns: network info as from get_instance_nw_info() below
"""
args = kwargs
args['instance_id'] = instance['id']
args['project_id'] = instance['project_id']
args['instance_type_id'] = instance['instance_type_id']
return rpc.call(context, FLAGS.network_topic,
{'method': 'allocate_for_instance',
'args': args})
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocates all network structures related to instance."""
args = kwargs
args['instance_id'] = instance['id']
args['project_id'] = instance['project_id']
rpc.cast(context, FLAGS.network_topic,
{'method': 'deallocate_for_instance',
'args': args})
def add_fixed_ip_to_instance(self, context, instance_id, network_id):
"""Adds a fixed ip to instance from specified network."""
args = {'instance_id': instance_id,
'network_id': network_id}
rpc.cast(context, FLAGS.network_topic,
{'method': 'add_fixed_ip_to_instance',
'args': args})
def add_network_to_project(self, context, project_id):
"""Force adds another network to a project."""
rpc.cast(context, FLAGS.network_topic,
{'method': 'add_network_to_project',
'args': {'project_id': project_id}})
def get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
args = {'instance_id': instance['id'],
'instance_type_id': instance['instance_type_id']}
return rpc.call(context, FLAGS.network_topic,
{'method': 'get_instance_nw_info',
'args': args})
+8 -8
View File
@@ -20,6 +20,7 @@
import calendar
import inspect
import netaddr
import os
from nova import db
@@ -27,7 +28,6 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from IPy import IP
LOG = logging.getLogger("nova.linux_net")
@@ -451,20 +451,20 @@ def floating_forward_rules(floating_ip, fixed_ip):
'-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
"""Create a vlan and bridge unless they already exist."""
interface = ensure_vlan(vlan_num)
interface = ensure_vlan(vlan_num, bridge_interface)
ensure_bridge(bridge, interface, net_attrs)
@utils.synchronized('ensure_vlan', external=True)
def ensure_vlan(vlan_num):
def ensure_vlan(vlan_num, bridge_interface):
"""Create a vlan unless it already exists."""
interface = 'vlan%s' % vlan_num
if not _device_exists(interface):
LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD')
_execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num)
_execute('sudo', 'vconfig', 'add', bridge_interface, vlan_num)
_execute('sudo', 'ip', 'link', 'set', interface, 'up')
return interface
@@ -666,7 +666,7 @@ def _host_lease(fixed_ip_ref):
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time,
instance_ref['mac_address'],
fixed_ip_ref['virtual_interface']['address'],
fixed_ip_ref['address'],
instance_ref['hostname'] or '*')
@@ -674,7 +674,7 @@ def _host_lease(fixed_ip_ref):
def _host_dhcp(fixed_ip_ref):
"""Return a host string for an address in dhcp-host format."""
instance_ref = fixed_ip_ref['instance']
return '%s,%s.%s,%s' % (instance_ref['mac_address'],
return '%s,%s.%s,%s' % (fixed_ip_ref['virtual_interface']['address'],
instance_ref['hostname'],
FLAGS.dhcp_domain,
fixed_ip_ref['address'])
@@ -707,7 +707,7 @@ def _dnsmasq_cmd(net):
'--listen-address=%s' % net['gateway'],
'--except-interface=lo',
'--dhcp-range=%s,static,120s' % net['dhcp_start'],
'--dhcp-lease-max=%s' % IP(net['cidr']).len(),
'--dhcp-lease-max=%s' % len(netaddr.IPNetwork(net['cidr'])),
'--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']
+525 -283
View File
File diff suppressed because it is too large Load Diff
+2 -2
View File
@@ -33,7 +33,7 @@ FLAGS = flags.FLAGS
FLAGS['vlan_interface'].SetDefault('vmnic0')
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
"""Create a vlan and bridge unless they already exist."""
# Open vmwareapi session
host_ip = FLAGS.vmwareapi_host_ip
@@ -46,7 +46,7 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
'connection_type=vmwareapi'))
session = VMWareAPISession(host_ip, host_username, host_password,
FLAGS.vmwareapi_api_retry_count)
vlan_interface = FLAGS.vlan_interface
vlan_interface = bridge_interface
# Check if the vlan_interface physical network adapter exists on the host
if not network_utils.check_if_vlan_interface_exists(session,
vlan_interface):
+6 -4
View File
@@ -34,7 +34,7 @@ LOG = logging.getLogger("nova.xenapi_net")
FLAGS = flags.FLAGS
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
"""Create a vlan and bridge unless they already exist."""
# Open xenapi session
LOG.debug('ENTERING ensure_vlan_bridge in xenapi net')
@@ -56,14 +56,16 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
'other_config': {}}
network_ref = session.call_xenapi('network.create', network_rec)
# 2 - find PIF for VLAN
expr = "field 'device' = '%s' and \
field 'VLAN' = '-1'" % FLAGS.vlan_interface
# NOTE(salvatore-orlando): using double quotes inside single quotes
# as xapi filter only support tokens in double quotes
expr = 'field "device" = "%s" and \
field "VLAN" = "-1"' % bridge_interface
pifs = session.call_xenapi('PIF.get_all_records_where', expr)
pif_ref = None
# Multiple PIF are ok: we are dealing with a pool
if len(pifs) == 0:
raise Exception(
_('Found no PIF for device %s') % FLAGS.vlan_interface)
_('Found no PIF for device %s') % bridge_interface)
# 3 - create vlan for network
for pif_ref in pifs.keys():
session.call_xenapi('VLAN.create',
+28
View File
@@ -0,0 +1,28 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from nova import flags
from nova import log as logging
FLAGS = flags.FLAGS
NOTIFICATIONS = []
def notify(message):
"""Test notifier, stores notifications in memory for unittests."""
NOTIFICATIONS.append(message)
+6
View File
@@ -275,6 +275,11 @@ class FanoutAdapterConsumer(AdapterConsumer):
unique = uuid.uuid4().hex
self.queue = '%s_fanout_%s' % (topic, unique)
self.durable = False
# Fanout creates unique queue names, so we should auto-remove
# them when done, so they're not left around on restart.
# Also, we're the only one that should be consuming. exclusive
# implies auto_delete, so we'll just set that..
self.exclusive = True
LOG.info(_('Created "%(exchange)s" fanout exchange '
'with "%(key)s" routing key'),
dict(exchange=self.exchange, key=self.routing_key))
@@ -355,6 +360,7 @@ class FanoutPublisher(Publisher):
self.exchange = '%s_fanout' % topic
self.queue = '%s_fanout' % topic
self.durable = False
self.auto_delete = True
LOG.info(_('Creating "%(exchange)s" fanout exchange'),
dict(exchange=self.exchange))
super(FanoutPublisher, self).__init__(connection=connection)
+45 -16
View File
@@ -51,6 +51,11 @@ def _call_scheduler(method, context, params=None):
return rpc.call(context, queue, kwargs)
def get_host_list(context):
"""Return a list of hosts associated with this zone."""
return _call_scheduler('get_host_list', context)
def get_zone_list(context):
"""Return a list of zones assoicated with this zone."""
items = _call_scheduler('get_zone_list', context)
@@ -114,7 +119,8 @@ def _process(func, zone):
def call_zone_method(context, method_name, errors_to_ignore=None,
novaclient_collection_name='zones', *args, **kwargs):
novaclient_collection_name='zones', zones=None,
*args, **kwargs):
"""Returns a list of (zone, call_result) objects."""
if not isinstance(errors_to_ignore, (list, tuple)):
# This will also handle the default None
@@ -122,7 +128,9 @@ def call_zone_method(context, method_name, errors_to_ignore=None,
pool = greenpool.GreenPool()
results = []
for zone in db.zone_get_all(context):
if zones is None:
zones = db.zone_get_all(context)
for zone in zones:
try:
nova = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url)
@@ -162,32 +170,53 @@ def child_zone_helper(zone_list, func):
_wrap_method(_process, func), zone_list)]
def _issue_novaclient_command(nova, zone, collection, method_name, item_id):
def _issue_novaclient_command(nova, zone, collection,
method_name, *args, **kwargs):
"""Use novaclient to issue command to a single child zone.
One of these will be run in parallel for each child zone."""
One of these will be run in parallel for each child zone.
"""
manager = getattr(nova, collection)
result = None
try:
# NOTE(comstud): This is not ideal, but we have to do this based on
# how novaclient is implemented right now.
# 'find' is special cased as novaclient requires kwargs for it to
# filter on a 'get_all'.
# Every other method first needs to do a 'get' on the first argument
# passed, which should be a UUID. If it's 'get' itself that we want,
# we just return the result. Otherwise, we next call the real method
# that's wanted... passing other arguments that may or may not exist.
if method_name in ['find', 'findall']:
try:
result = manager.get(int(item_id))
except ValueError, e:
result = manager.find(name=item_id)
return getattr(manager, method_name)(**kwargs)
except novaclient.NotFound:
url = zone.api_url
LOG.debug(_("%(collection)s.%(method_name)s didn't find "
"anything matching '%(kwargs)s' on '%(url)s'" %
locals()))
return None
args = list(args)
# pop off the UUID to look up
item = args.pop(0)
try:
result = manager.get(item)
except novaclient.NotFound:
url = zone.api_url
LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" %
LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" %
locals()))
return None
if method_name.lower() not in ['get', 'find']:
result = getattr(result, method_name)()
if method_name.lower() != 'get':
# if we're doing something other than 'get', call it passing args.
result = getattr(result, method_name)(*args, **kwargs)
return result
def wrap_novaclient_function(f, collection, method_name, item_id):
"""Appends collection, method_name and item_id to the incoming
def wrap_novaclient_function(f, collection, method_name, *args, **kwargs):
"""Appends collection, method_name and arguments to the incoming
(nova, zone) call from child_zone_helper."""
def inner(nova, zone):
return f(nova, zone, collection, method_name, item_id)
return f(nova, zone, collection, method_name, *args, **kwargs)
return inner
@@ -220,7 +249,7 @@ class reroute_compute(object):
the wrapped method. (This ensures that zone-local code can
continue to use integer IDs).
4. If the item was not found, we delgate the call to a child zone
4. If the item was not found, we delegate the call to a child zone
using the UUID.
"""
def __init__(self, method_name):
+8 -8
View File
@@ -129,8 +129,7 @@ class Scheduler(object):
# Checking instance is running.
if (power_state.RUNNING != instance_ref['state'] or \
'running' != instance_ref['state_description']):
ec2_id = instance_ref['hostname']
raise exception.InstanceNotRunning(instance_id=ec2_id)
raise exception.InstanceNotRunning(instance_id=instance_ref['id'])
# Checing volume node is running when any volumes are mounted
# to the instance.
@@ -168,9 +167,9 @@ class Scheduler(object):
# and dest is not same.
src = instance_ref['host']
if dest == src:
ec2_id = instance_ref['hostname']
raise exception.UnableToMigrateToSelf(instance_id=ec2_id,
host=dest)
raise exception.UnableToMigrateToSelf(
instance_id=instance_ref['id'],
host=dest)
# Checking dst host still has enough capacities.
self.assert_compute_node_has_enough_resources(context,
@@ -245,7 +244,7 @@ class Scheduler(object):
"""
# Getting instance information
ec2_id = instance_ref['hostname']
hostname = instance_ref['hostname']
# Getting host information
service_refs = db.service_get_all_compute_by_host(context, dest)
@@ -256,8 +255,9 @@ class Scheduler(object):
mem_avail = mem_total - mem_used
mem_inst = instance_ref['memory_mb']
if mem_avail <= mem_inst:
reason = _("Unable to migrate %(ec2_id)s to destination: %(dest)s "
"(host:%(mem_avail)s <= instance:%(mem_inst)s)")
reason = _("Unable to migrate %(hostname)s to destination: "
"%(dest)s (host:%(mem_avail)s <= instance:"
"%(mem_inst)s)")
raise exception.MigrationError(reason=reason % locals())
def mounted_on_same_shared_storage(self, context, instance_ref, dest):
+31 -6
View File
@@ -93,6 +93,26 @@ class InstanceTypeFilter(HostFilter):
"""Use instance_type to filter hosts."""
return (self._full_name(), instance_type)
def _satisfies_extra_specs(self, capabilities, instance_type):
"""Check that the capabilities provided by the compute service
satisfy the extra specs associated with the instance type"""
if 'extra_specs' not in instance_type:
return True
# Note(lorinh): For now, we are just checking exact matching on the
# values. Later on, we want to handle numerical
# values so we can represent things like number of GPU cards
try:
for key, value in instance_type['extra_specs'].iteritems():
if capabilities[key] != value:
return False
except KeyError:
return False
return True
def filter_hosts(self, zone_manager, query):
"""Return a list of hosts that can create instance_type."""
instance_type = query
@@ -103,7 +123,11 @@ class InstanceTypeFilter(HostFilter):
disk_bytes = capabilities['disk_available']
spec_ram = instance_type['memory_mb']
spec_disk = instance_type['local_gb']
if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
extra_specs = instance_type['extra_specs']
if host_ram_mb >= spec_ram and \
disk_bytes >= spec_disk and \
self._satisfies_extra_specs(capabilities, instance_type):
selected_hosts.append((host, capabilities))
return selected_hosts
@@ -227,8 +251,7 @@ class JsonFilter(HostFilter):
required_disk = instance_type['local_gb']
query = ['and',
['>=', '$compute.host_memory_free', required_ram],
['>=', '$compute.disk_available', required_disk],
]
['>=', '$compute.disk_available', required_disk]]
return (self._full_name(), json.dumps(query))
def _parse_string(self, string, host, services):
@@ -305,8 +328,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
'instance_type': <InstanceType dict>}
"""
def filter_hosts(self, num, request_spec):
def filter_hosts(self, topic, request_spec, hosts=None):
"""Filter the full host list (from the ZoneManager)"""
filter_name = request_spec.get('filter', None)
host_filter = choose_host_filter(filter_name)
@@ -317,8 +341,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
name, query = host_filter.instance_type_to_filter(instance_type)
return host_filter.filter_hosts(self.zone_manager, query)
def weigh_hosts(self, num, request_spec, hosts):
def weigh_hosts(self, topic, request_spec, hosts):
"""Derived classes must override this method and return
a lists of hosts in [{weight, hostname}] format.
"""
return [dict(weight=1, hostname=host) for host, caps in hosts]
return [dict(weight=1, hostname=hostname, capabilities=caps)
for hostname, caps in hosts]
+32 -14
View File
@@ -48,25 +48,43 @@ def noop_cost_fn(host):
return 1
flags.DEFINE_integer('fill_first_cost_fn_weight', 1,
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
'How much weight to give the fill-first cost function')
def fill_first_cost_fn(host):
def compute_fill_first_cost_fn(host):
"""Prefer hosts that have less ram available, filter_hosts will exclude
hosts that don't have enough ram"""
hostname, caps = host
free_mem = caps['compute']['host_memory_free']
free_mem = caps['host_memory_free']
return free_mem
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
def get_cost_fns(self):
def __init__(self, *args, **kwargs):
self.cost_fns_cache = {}
super(LeastCostScheduler, self).__init__(*args, **kwargs)
def get_cost_fns(self, topic):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
if topic in self.cost_fns_cache:
return self.cost_fns_cache[topic]
cost_fns = []
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
if '.' in cost_fn_str:
short_name = cost_fn_str.split('.')[-1]
else:
short_name = cost_fn_str
cost_fn_str = "%s.%s.%s" % (
__name__, self.__class__.__name__, short_name)
if not (short_name.startswith('%s_' % topic) or
short_name.startswith('noop')):
continue
try:
# NOTE(sirp): import_class is somewhat misnamed since it can
@@ -84,23 +102,23 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
cost_fns.append((weight, cost_fn))
self.cost_fns_cache[topic] = cost_fns
return cost_fns
def weigh_hosts(self, num, request_spec, hosts):
def weigh_hosts(self, topic, request_spec, hosts):
"""Returns a list of dictionaries of form:
[ {weight: weight, hostname: hostname} ]"""
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
"""
# FIXME(sirp): weigh_hosts should handle more than just instances
hostnames = [hostname for hostname, caps in hosts]
cost_fns = self.get_cost_fns()
cost_fns = self.get_cost_fns(topic)
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
weighted = []
weight_log = []
for cost, hostname in zip(costs, hostnames):
for cost, (hostname, caps) in zip(costs, hosts):
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
weight_dict = dict(weight=cost, hostname=hostname)
weight_dict = dict(weight=cost, hostname=hostname,
capabilities=caps)
weighted.append(weight_dict)
LOG.debug(_("Weighted Costs => %s") % weight_log)
@@ -127,7 +145,8 @@ def weighted_sum(domain, weighted_fns, normalize=True):
weighted_fns - list of weights and functions like:
[(weight, objective-functions)]
Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts)
Returns an unsorted list of scores. To pair with hosts do:
zip(scores, hosts)
"""
# Table of form:
# { domain1: [score1, score2, ..., scoreM]
@@ -150,7 +169,6 @@ def weighted_sum(domain, weighted_fns, normalize=True):
domain_scores = []
for idx in sorted(score_table):
elem_score = sum(score_table[idx])
elem = domain[idx]
domain_scores.append(elem_score)
return domain_scores
+4
View File
@@ -56,6 +56,10 @@ class SchedulerManager(manager.Manager):
"""Poll child zones periodically to get status."""
self.zone_manager.ping(context)
def get_host_list(self, context=None):
"""Get a list of hosts from the ZoneManager."""
return self.zone_manager.get_host_list()
def get_zone_list(self, context=None):
"""Get a list of zones from the ZoneManager."""
return self.zone_manager.get_zone_list()
+152 -42
View File
@@ -33,6 +33,7 @@ from nova import flags
from nova import log as logging
from nova import rpc
from nova.compute import api as compute_api
from nova.scheduler import api
from nova.scheduler import driver
@@ -48,14 +49,25 @@ class InvalidBlob(exception.NovaException):
class ZoneAwareScheduler(driver.Scheduler):
"""Base class for creating Zone Aware Schedulers."""
def _call_zone_method(self, context, method, specs):
def _call_zone_method(self, context, method, specs, zones):
"""Call novaclient zone method. Broken out for testing."""
return api.call_zone_method(context, method, specs=specs)
return api.call_zone_method(context, method, specs=specs, zones=zones)
def _provision_resource_locally(self, context, item, instance_id, kwargs):
def _provision_resource_locally(self, context, build_plan_item,
request_spec, kwargs):
"""Create the requested resource in this Zone."""
host = item['hostname']
host = build_plan_item['hostname']
base_options = request_spec['instance_properties']
# TODO(sandy): I guess someone needs to add block_device_mapping
# support at some point? Also, OS API has no concept of security
# groups.
instance = compute_api.API().create_db_entry_for_new_instance(context,
base_options, None, [])
instance_id = instance['id']
kwargs['instance_id'] = instance_id
rpc.cast(context,
db.queue_get_for(context, "compute", host),
{"method": "run_instance",
@@ -115,8 +127,8 @@ class ZoneAwareScheduler(driver.Scheduler):
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
child_blob, reservation_id=reservation_id)
def _provision_resource_from_blob(self, context, item, instance_id,
request_spec, kwargs):
def _provision_resource_from_blob(self, context, build_plan_item,
instance_id, request_spec, kwargs):
"""Create the requested resource locally or in a child zone
based on what is stored in the zone blob info.
@@ -132,12 +144,12 @@ class ZoneAwareScheduler(driver.Scheduler):
request."""
host_info = None
if "blob" in item:
if "blob" in build_plan_item:
# Request was passed in from above. Is it for us?
host_info = self._decrypt_blob(item['blob'])
elif "child_blob" in item:
host_info = self._decrypt_blob(build_plan_item['blob'])
elif "child_blob" in build_plan_item:
# Our immediate child zone provided this info ...
host_info = item
host_info = build_plan_item
if not host_info:
raise InvalidBlob()
@@ -147,19 +159,46 @@ class ZoneAwareScheduler(driver.Scheduler):
self._ask_child_zone_to_create_instance(context, host_info,
request_spec, kwargs)
else:
self._provision_resource_locally(context, host_info,
instance_id, kwargs)
self._provision_resource_locally(context, host_info, request_spec,
kwargs)
def _provision_resource(self, context, item, instance_id, request_spec,
kwargs):
def _provision_resource(self, context, build_plan_item, instance_id,
request_spec, kwargs):
"""Create the requested resource in this Zone or a child zone."""
if "hostname" in item:
self._provision_resource_locally(context, item, instance_id,
kwargs)
if "hostname" in build_plan_item:
self._provision_resource_locally(context, build_plan_item,
request_spec, kwargs)
return
self._provision_resource_from_blob(context, item, instance_id,
request_spec, kwargs)
self._provision_resource_from_blob(context, build_plan_item,
instance_id, request_spec, kwargs)
def _adjust_child_weights(self, child_results, zones):
"""Apply the Scale and Offset values from the Zone definition
to adjust the weights returned from the child zones. Alters
child_results in place.
"""
for zone_id, result in child_results:
if not result:
continue
assert isinstance(zone_id, int)
for zone_rec in zones:
if zone_rec['id'] != zone_id:
continue
for item in result:
try:
offset = zone_rec['weight_offset']
scale = zone_rec['weight_scale']
raw_weight = item['weight']
cooked_weight = offset + scale * raw_weight
item['weight'] = cooked_weight
item['raw_weight'] = raw_weight
except KeyError:
LOG.exception(_("Bad child zone scaling values "
"for Zone: %(zone_id)s") % locals())
def schedule_run_instance(self, context, instance_id, request_spec,
*args, **kwargs):
@@ -180,18 +219,22 @@ class ZoneAwareScheduler(driver.Scheduler):
request_spec, kwargs)
return None
num_instances = request_spec.get('num_instances', 1)
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
locals())
# Create build plan and provision ...
build_plan = self.select(context, request_spec)
if not build_plan:
raise driver.NoValidHost(_('No hosts were available'))
for num in xrange(request_spec['num_instances']):
for num in xrange(num_instances):
if not build_plan:
break
item = build_plan.pop(0)
self._provision_resource(context, item, instance_id, request_spec,
kwargs)
build_plan_item = build_plan.pop(0)
self._provision_resource(context, build_plan_item, instance_id,
request_spec, kwargs)
# Returning None short-circuits the routing to Compute (since
# we've already done it here)
@@ -224,23 +267,43 @@ class ZoneAwareScheduler(driver.Scheduler):
raise NotImplemented(_("Zone Aware Scheduler only understands "
"Compute nodes (for now)"))
#TODO(sandy): how to infer this from OS API params?
num_instances = 1
num_instances = request_spec.get('num_instances', 1)
instance_type = request_spec['instance_type']
# Filter local hosts based on requirements ...
host_list = self.filter_hosts(num_instances, request_spec)
weighted = []
host_list = None
# TODO(sirp): weigh_hosts should also be a function of 'topic' or
# resources, so that we can apply different objective functions to it
for i in xrange(num_instances):
# Filter local hosts based on requirements ...
#
# The first pass through here will pass 'None' as the
# host_list.. which tells the filter to build the full
# list of hosts.
# On a 2nd pass, the filter can modify the host_list with
# any updates it needs to make based on resources that
# may have been consumed from a previous build..
host_list = self.filter_hosts(topic, request_spec, host_list)
if not host_list:
LOG.warn(_("Filter returned no hosts after processing "
"%(i)d of %(num_instances)d instances") % locals())
break
# then weigh the selected hosts.
# weighted = [{weight=weight, name=hostname}, ...]
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
# then weigh the selected hosts.
# weighted = [{weight=weight, hostname=hostname,
# capabilities=capabs}, ...]
weights = self.weigh_hosts(topic, request_spec, host_list)
weights.sort(key=operator.itemgetter('weight'))
best_weight = weights[0]
weighted.append(best_weight)
self.consume_resources(topic, best_weight['capabilities'],
instance_type)
# Next, tack on the best weights from the child zones ...
json_spec = json.dumps(request_spec)
all_zones = db.zone_get_all(context)
child_results = self._call_zone_method(context, "select",
specs=json_spec)
specs=json_spec, zones=all_zones)
self._adjust_child_weights(child_results, all_zones)
for child_zone, result in child_results:
for weighting in result:
# Remember the child_zone so we can get back to
@@ -254,18 +317,65 @@ class ZoneAwareScheduler(driver.Scheduler):
weighted.sort(key=operator.itemgetter('weight'))
return weighted
def filter_hosts(self, num, request_spec):
"""Derived classes must override this method and return
a list of hosts in [(hostname, capability_dict)] format.
def compute_filter(self, hostname, capabilities, request_spec):
"""Return whether or not we can schedule to this compute node.
Derived classes should override this and return True if the host
is acceptable for scheduling.
"""
# NOTE(sirp): The default logic is the equivalent to AllHostsFilter
service_states = self.zone_manager.service_states
return [(host, services)
for host, services in service_states.iteritems()]
instance_type = request_spec['instance_type']
requested_mem = instance_type['memory_mb'] * 1024 * 1024
return capabilities['host_memory_free'] >= requested_mem
def weigh_hosts(self, num, request_spec, hosts):
def filter_hosts(self, topic, request_spec, host_list=None):
"""Return a list of hosts which are acceptable for scheduling.
Return value should be a list of (hostname, capability_dict)s.
Derived classes may override this, but may find the
'<topic>_filter' function more appropriate.
"""
def _default_filter(self, hostname, capabilities, request_spec):
"""Default filter function if there's no <topic>_filter"""
# NOTE(sirp): The default logic is the equivalent to
# AllHostsFilter
return True
filter_func = getattr(self, '%s_filter' % topic, _default_filter)
if host_list is None:
first_run = True
host_list = self.zone_manager.service_states.iteritems()
else:
first_run = False
filtered_hosts = []
for host, services in host_list:
if first_run:
if topic not in services:
continue
services = services[topic]
if filter_func(host, services, request_spec):
filtered_hosts.append((host, services))
return filtered_hosts
def weigh_hosts(self, topic, request_spec, hosts):
"""Derived classes may override this to provide more sophisticated
scheduling objectives
"""
# NOTE(sirp): The default logic is the same as the NoopCostFunction
return [dict(weight=1, hostname=host) for host, caps in hosts]
return [dict(weight=1, hostname=hostname, capabilities=capabilities)
for hostname, capabilities in hosts]
def compute_consume(self, capabilities, instance_type):
"""Consume compute resources for selected host"""
requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024
capabilities['host_memory_free'] -= requested_mem
def consume_resources(self, topic, capabilities, instance_type):
"""Consume resources for a specific host. 'host' is a tuple
of the hostname and the services"""
consume_func = getattr(self, '%s_consume' % topic, None)
if not consume_func:
return
consume_func(capabilities, instance_type)
+46
View File
@@ -115,6 +115,18 @@ class ZoneManager(object):
"""Return the list of zones we know about."""
return [zone.to_dict() for zone in self.zone_states.values()]
def get_host_list(self):
"""Returns a list of dicts for each host that the Zone Manager
knows about. Each dict contains the host_name and the service
for that host.
"""
all_hosts = self.service_states.keys()
ret = []
for host in self.service_states:
for svc in self.service_states[host]:
ret.append({"service": svc, "host_name": host})
return ret
def get_zone_capabilities(self, context):
"""Roll up all the individual host info to generic 'service'
capabilities. Each capability is aggregated into
@@ -125,15 +137,30 @@ class ZoneManager(object):
# But it's likely to change once we understand what the Best-Match
# code will need better.
combined = {} # { <service>_<cap> : (min, max), ... }
stale_host_services = {} # { host1 : [svc1, svc2], host2 :[svc1]}
for host, host_dict in hosts_dict.iteritems():
for service_name, service_dict in host_dict.iteritems():
if not service_dict.get("enabled", True):
# Service is disabled; do no include it
continue
#Check if the service capabilities became stale
if self.host_service_caps_stale(host, service_name):
if host not in stale_host_services:
stale_host_services[host] = [] # Adding host key once
stale_host_services[host].append(service_name)
continue
for cap, value in service_dict.iteritems():
if cap == "timestamp": # Timestamp is not needed
continue
key = "%s_%s" % (service_name, cap)
min_value, max_value = combined.get(key, (value, value))
min_value = min(min_value, value)
max_value = max(max_value, value)
combined[key] = (min_value, max_value)
# Delete the expired host services
self.delete_expired_host_services(stale_host_services)
return combined
def _refresh_from_db(self, context):
@@ -172,5 +199,24 @@ class ZoneManager(object):
logging.debug(_("Received %(service_name)s service update from "
"%(host)s: %(capabilities)s") % locals())
service_caps = self.service_states.get(host, {})
capabilities["timestamp"] = utils.utcnow() # Reported time
service_caps[service_name] = capabilities
self.service_states[host] = service_caps
def host_service_caps_stale(self, host, service):
"""Check if host service capabilites are not recent enough."""
allowed_time_diff = FLAGS.periodic_interval * 3
caps = self.service_states[host][service]
if (utils.utcnow() - caps["timestamp"]) <= \
datetime.timedelta(seconds=allowed_time_diff):
return False
return True
def delete_expired_host_services(self, host_services_dict):
"""Delete all the inactive host services information."""
for host, services in host_services_dict.iteritems():
service_caps = self.service_states[host]
for service in services:
del service_caps[service]
if len(service_caps) == 0: # Delete host if no services
del self.service_states[host]
+101 -76
View File
@@ -19,10 +19,12 @@
"""Generic Node baseclass for all workers that run on hosts."""
import greenlet
import inspect
import multiprocessing
import os
import greenlet
from eventlet import greenthread
from nova import context
@@ -36,6 +38,8 @@ from nova import version
from nova import wsgi
LOG = logging.getLogger('nova.service')
FLAGS = flags.FLAGS
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to datastore',
@@ -53,6 +57,63 @@ flags.DEFINE_string('api_paste_config', "api-paste.ini",
'File name for the paste.deploy config for nova-api')
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self._services = []
@staticmethod
def run_service(service):
"""Start and wait for a service to finish.
:param service: Service to run and wait for.
:returns: None
"""
service.start()
try:
service.wait()
except KeyboardInterrupt:
service.stop()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
process = multiprocessing.Process(target=self.run_service,
args=(service,))
process.start()
self._services.append(process)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
for service in self._services:
if service.is_alive():
service.terminate()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
for service in self._services:
service.join()
class Service(object):
"""Base class for workers that run on hosts."""
@@ -232,45 +293,54 @@ class Service(object):
logging.exception(_('model server went away'))
class WsgiService(object):
"""Base class for WSGI based services.
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
For each api you define, you must also define these flags:
:<api>_listen: The address on which to listen
:<api>_listen_port: The port on which to listen
def __init__(self, name, loader=None):
"""Initialize, but do not start the WSGI service.
"""
:param name: The name of the WSGI service given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
def __init__(self, conf, apis):
self.conf = conf
self.apis = apis
self.wsgi_app = None
"""
self.name = name
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0")
self.port = getattr(FLAGS, '%s_listen_port' % name, 0)
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port)
def start(self):
self.wsgi_app = _run_wsgi(self.conf, self.apis)
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
self.server.start()
self.port = self.server.port
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
self.wsgi_app.wait()
"""Wait for the service to stop serving this API.
def get_socket_info(self, api_name):
"""Returns the (host, port) that an API was started on."""
return self.wsgi_app.socket_info[api_name]
:returns: None
class ApiService(WsgiService):
"""Class for our nova-api service."""
@classmethod
def create(cls, conf=None):
if not conf:
conf = wsgi.paste_config_file(FLAGS.api_paste_config)
if not conf:
message = (_('No paste configuration found for: %s'),
FLAGS.api_paste_config)
raise exception.Error(message)
api_endpoints = ['ec2', 'osapi']
service = cls(conf, api_endpoints)
return service
"""
self.server.wait()
def serve(*services):
@@ -302,48 +372,3 @@ def serve(*services):
def wait():
while True:
greenthread.sleep(5)
def serve_wsgi(cls, conf=None):
try:
service = cls.create(conf)
except Exception:
logging.exception('in WsgiService.create()')
raise
finally:
# After we've loaded up all our dynamic bits, check
# whether we should print help
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
FLAGS.ParseNewFlags()
service.start()
return service
def _run_wsgi(paste_config_file, apis):
logging.debug(_('Using paste.deploy config at: %s'), paste_config_file)
apps = []
for api in apis:
config = wsgi.load_paste_configuration(paste_config_file, api)
if config is None:
logging.debug(_('No paste configuration for app: %s'), api)
continue
logging.debug(_('App Config: %(api)s\n%(config)r') % locals())
logging.info(_('Running %s API'), api)
app = wsgi.load_paste_app(paste_config_file, api)
apps.append((app,
getattr(FLAGS, '%s_listen_port' % api),
getattr(FLAGS, '%s_listen' % api),
api))
if len(apps) == 0:
logging.error(_('No known API applications configured in %s.'),
paste_config_file)
return
server = wsgi.Server()
for app in apps:
server.start(*app)
return server
+19 -23
View File
@@ -30,15 +30,17 @@ import uuid
import unittest
import mox
import nose.plugins.skip
import shutil
import stubout
from eventlet import greenthread
from nova import fakerabbit
from nova import flags
from nova import log
from nova import rpc
from nova import utils
from nova import service
from nova import wsgi
from nova.virt import fake
@@ -48,6 +50,22 @@ flags.DEFINE_string('sqlite_clean_db', 'clean.sqlite',
flags.DEFINE_bool('fake_tests', True,
'should we use everything for testing')
LOG = log.getLogger('nova.tests')
class skip_test(object):
"""Decorator that skips a test."""
def __init__(self, msg):
self.message = msg
def __call__(self, func):
def _skipper(*args, **kw):
"""Wrapped skipper function."""
raise nose.SkipTest(self.message)
_skipper.__name__ = func.__name__
_skipper.__doc__ = func.__doc__
return _skipper
def skip_if_fake(func):
"""Decorator that skips a test if running in fake mode."""
@@ -81,7 +99,6 @@ class TestCase(unittest.TestCase):
self.injected = []
self._services = []
self._monkey_patch_attach()
self._monkey_patch_wsgi()
self._original_flags = FLAGS.FlagValuesDict()
rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size)
@@ -107,7 +124,6 @@ class TestCase(unittest.TestCase):
# Reset our monkey-patches
rpc.Consumer.attach_to_eventlet = self.original_attach
wsgi.Server.start = self.original_start
# Stop any timers
for x in self.injected:
@@ -163,26 +179,6 @@ class TestCase(unittest.TestCase):
_wrapped.func_name = self.original_attach.func_name
rpc.Consumer.attach_to_eventlet = _wrapped
def _monkey_patch_wsgi(self):
"""Allow us to kill servers spawned by wsgi.Server."""
self.original_start = wsgi.Server.start
@functools.wraps(self.original_start)
def _wrapped_start(inner_self, *args, **kwargs):
original_spawn_n = inner_self.pool.spawn_n
@functools.wraps(original_spawn_n)
def _wrapped_spawn_n(*args, **kwargs):
rv = greenthread.spawn(*args, **kwargs)
self._services.append(rv)
inner_self.pool.spawn_n = _wrapped_spawn_n
self.original_start(inner_self, *args, **kwargs)
inner_self.pool.spawn_n = original_spawn_n
_wrapped_start.func_name = self.original_start.func_name
wsgi.Server.start = _wrapped_start
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
+17 -9
View File
@@ -42,6 +42,7 @@ def setup():
from nova import context
from nova import flags
from nova import db
from nova.db import migration
from nova.network import manager as network_manager
from nova.tests import fake_flags
@@ -50,17 +51,24 @@ def setup():
testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db)
if os.path.exists(testdb):
os.unlink(testdb)
return
migration.db_sync()
ctxt = context.get_admin_context()
network_manager.VlanManager().create_networks(ctxt,
FLAGS.fixed_range,
FLAGS.num_networks,
FLAGS.network_size,
FLAGS.fixed_range_v6,
FLAGS.vlan_start,
FLAGS.vpn_start,
)
network = network_manager.VlanManager()
bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface
network.create_networks(ctxt,
label='test',
cidr=FLAGS.fixed_range,
num_networks=FLAGS.num_networks,
network_size=FLAGS.network_size,
cidr_v6=FLAGS.fixed_range_v6,
gateway_v6=FLAGS.gateway_v6,
bridge=FLAGS.flat_network_bridge,
bridge_interface=bridge_interface,
vpn_start=FLAGS.vpn_start,
vlan_start=FLAGS.vlan_start)
for net in db.network_get_all(ctxt):
network.set_network_host(ctxt, net['id'])
cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
+19
View File
@@ -0,0 +1,19 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Openstack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
from nova.tests import *
+3
View File
@@ -15,6 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
from nova.tests import *
import webob.dec
from nova import test
@@ -0,0 +1,15 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
@@ -0,0 +1,189 @@
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import webob
from nova import context
from nova import db
from nova import test
from nova import network
from nova.tests.api.openstack import fakes
from nova.api.openstack.contrib.floating_ips import FloatingIPController
from nova.api.openstack.contrib.floating_ips import _translate_floating_ip_view
def network_api_get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10',
'fixed_ip': {'address': '11.0.0.1'}}
def network_api_list_floating_ips(self, context):
return [{'id': 1,
'address': '10.10.10.10',
'instance': {'id': 11},
'fixed_ip': {'address': '10.0.0.1'}},
{'id': 2,
'address': '10.10.10.11'}]
def network_api_allocate(self, context):
return '10.10.10.10'
def network_api_release(self, context, address):
pass
def network_api_associate(self, context, floating_ip, fixed_ip):
pass
def network_api_disassociate(self, context, floating_address):
pass
class FloatingIpTest(test.TestCase):
address = "10.10.10.10"
def _create_floating_ip(self):
"""Create a floating ip object."""
host = "fake_host"
return db.floating_ip_create(self.context,
{'address': self.address,
'host': host})
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.address)
def setUp(self):
super(FloatingIpTest, self).setUp()
self.controller = FloatingIPController()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "list_floating_ips",
network_api_list_floating_ips)
self.stubs.Set(network.api.API, "allocate_floating_ip",
network_api_allocate)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "associate_floating_ip",
network_api_associate)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.context = context.get_admin_context()
self._create_floating_ip()
def tearDown(self):
self.stubs.UnsetAll()
self._delete_floating_ip()
super(FloatingIpTest, self).tearDown()
def test_translate_floating_ip_view(self):
floating_ip_address = self._create_floating_ip()
floating_ip = db.floating_ip_get_by_address(self.context,
floating_ip_address)
view = _translate_floating_ip_view(floating_ip)
self.assertTrue('floating_ip' in view)
self.assertTrue(view['floating_ip']['id'])
self.assertEqual(view['floating_ip']['ip'], self.address)
self.assertEqual(view['floating_ip']['fixed_ip'], None)
self.assertEqual(view['floating_ip']['instance_id'], None)
def test_floating_ips_list(self):
req = webob.Request.blank('/v1.1/os-floating-ips')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
response = {'floating_ips': [{'floating_ip': {'instance_id': 11,
'ip': '10.10.10.10',
'fixed_ip': '10.0.0.1',
'id': 1}},
{'floating_ip': {'instance_id': None,
'ip': '10.10.10.11',
'fixed_ip': None,
'id': 2}}]}
self.assertEqual(res_dict, response)
def test_floating_ip_show(self):
req = webob.Request.blank('/v1.1/os-floating-ips/1')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['fixed_ip'], '11.0.0.1')
self.assertEqual(res_dict['floating_ip']['instance_id'], None)
def test_floating_ip_allocate(self):
req = webob.Request.blank('/v1.1/os-floating-ips')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
print res
self.assertEqual(res.status_int, 200)
ip = json.loads(res.body)['allocated']
expected = {
"id": 1,
"floating_ip": '10.10.10.10'}
self.assertEqual(ip, expected)
def test_floating_ip_release(self):
req = webob.Request.blank('/v1.1/os-floating-ips/1')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
actual = json.loads(res.body)['released']
expected = {
"id": 1,
"floating_ip": '10.10.10.10'}
self.assertEqual(actual, expected)
def test_floating_ip_associate(self):
body = dict(associate_address=dict(fixed_ip='1.2.3.4'))
req = webob.Request.blank('/v1.1/os-floating-ips/1/associate')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
actual = json.loads(res.body)['associated']
expected = {
"floating_ip_id": '1',
"floating_ip": "10.10.10.10",
"fixed_ip": "1.2.3.4"}
self.assertEqual(actual, expected)
def test_floating_ip_disassociate(self):
req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
ip = json.loads(res.body)['disassociated']
expected = {
"floating_ip": '10.10.10.10',
"fixed_ip": '11.0.0.1'}
self.assertEqual(ip, expected)
@@ -0,0 +1,117 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import webob
from nova import compute
from nova import context
from nova import test
from nova.tests.api.openstack import fakes
last_add_fixed_ip = (None, None)
last_remove_fixed_ip = (None, None)
def compute_api_add_fixed_ip(self, context, instance_id, network_id):
global last_add_fixed_ip
last_add_fixed_ip = (instance_id, network_id)
def compute_api_remove_fixed_ip(self, context, instance_id, address):
global last_remove_fixed_ip
last_remove_fixed_ip = (instance_id, address)
class FixedIpTest(test.TestCase):
def setUp(self):
super(FixedIpTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
self.stubs.Set(compute.api.API, "add_fixed_ip",
compute_api_add_fixed_ip)
# TODO(Vek): Fails until remove_fixed_ip() added
# self.stubs.Set(compute.api.API, "remove_fixed_ip",
# compute_api_remove_fixed_ip)
self.context = context.get_admin_context()
def tearDown(self):
self.stubs.UnsetAll()
super(FixedIpTest, self).tearDown()
def test_add_fixed_ip(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
body = dict(addFixedIp=dict(networkId='test_net'))
req = webob.Request.blank('/v1.1/servers/test_inst/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 202)
self.assertEqual(last_add_fixed_ip, ('test_inst', 'test_net'))
def test_add_fixed_ip_no_network(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
body = dict(addFixedIp=dict())
req = webob.Request.blank('/v1.1/servers/test_inst/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 422)
self.assertEqual(last_add_fixed_ip, (None, None))
def test_remove_fixed_ip(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
body = dict(removeFixedIp=dict(address='10.10.10.1'))
req = webob.Request.blank('/v1.1/servers/test_inst/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
# TODO(Vek): Fails until remove_fixed_ip() added
# self.assertEqual(resp.status_int, 202)
# self.assertEqual(last_remove_fixed_ip, ('test_inst', '10.10.10.1'))
def test_remove_fixed_ip_no_address(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
body = dict(removeFixedIp=dict())
req = webob.Request.blank('/v1.1/servers/test_inst/action')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 422)
self.assertEqual(last_remove_fixed_ip, (None, None))
@@ -0,0 +1,198 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import unittest
import webob
import os.path
from nova import flags
from nova.api import openstack
from nova.api.openstack import auth
from nova.api.openstack import extensions
from nova.tests.api.openstack import fakes
import nova.wsgi
FLAGS = flags.FLAGS
def return_create_flavor_extra_specs(context, flavor_id, extra_specs):
return stub_flavor_extra_specs()
def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
def return_empty_flavor_extra_specs(context, flavor_id):
return {}
def delete_flavor_extra_specs(context, flavor_id, key):
pass
def stub_flavor_extra_specs():
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
class FlavorsExtraSpecsTest(unittest.TestCase):
def setUp(self):
super(FlavorsExtraSpecsTest, self).setUp()
FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
"extensions")
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
self.mware = auth.AuthMiddleware(
extensions.ExtensionMiddleware(
openstack.APIRouterV11()))
def tearDown(self):
self.stubs.UnsetAll()
super(FlavorsExtraSpecsTest, self).tearDown()
def test_index(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_flavor_extra_specs)
request = webob.Request.blank('/flavors/1/os-extra_specs')
res = request.get_response(self.mware)
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_index_no_data(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs')
res = req.get_response(self.mware)
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual(0, len(res_dict['extra_specs']))
def test_show(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
res = req.get_response(self.mware)
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value5', res_dict['key5'])
def test_show_spec_not_found(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key6')
res = req.get_response(self.mware)
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_delete(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_delete',
delete_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
req.method = 'DELETE'
res = req.get_response(self.mware)
self.assertEqual(200, res.status_int)
def test_create(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs')
req.method = 'POST'
req.body = '{"extra_specs": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_create_empty_body(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs')
req.method = 'POST'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)
def test_update_item(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
res_dict = json.loads(res.body)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_empty_body(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.body = '{"key1": "value1", "key2": "value2"}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = webob.Request.blank('/flavors/1/os-extra_specs/bad')
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.headers["content-type"] = "application/json"
res = req.get_response(self.mware)
self.assertEqual(400, res.status_int)
+13 -3
View File
@@ -16,7 +16,6 @@
# under the License.
import copy
import json
import random
import string
@@ -29,11 +28,11 @@ from glance.common import exception as glance_exc
from nova import context
from nova import exception as exc
from nova import flags
from nova import utils
import nova.api.openstack.auth
from nova.api import openstack
from nova.api.openstack import auth
from nova.api.openstack import extensions
from nova.api.openstack import versions
from nova.api.openstack import limits
from nova.auth.manager import User, Project
@@ -82,7 +81,8 @@ def wsgi_app(inner_app10=None, inner_app11=None):
api10 = openstack.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_app10)))
api11 = openstack.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_app11)))
limits.RateLimitingMiddleware(
extensions.ExtensionMiddleware(inner_app11))))
mapper['/v1.0'] = api10
mapper['/v1.1'] = api11
mapper['/'] = openstack.FaultWrapper(versions.Versions())
@@ -147,6 +147,16 @@ def stub_out_compute_api_snapshot(stubs):
stubs.Set(nova.compute.API, 'snapshot', snapshot)
def stub_out_compute_api_backup(stubs):
def backup(self, context, instance_id, name, backup_type, rotation,
extra_properties=None):
props = dict(instance_id=instance_id, instance_ref=instance_id,
backup_type=backup_type, rotation=rotation)
props.update(extra_properties or {})
return dict(id='123', status='ACTIVE', name=name, properties=props)
stubs.Set(nova.compute.API, 'backup', backup)
def stub_out_glance_add_image(stubs, sent_to_glance):
"""
We return the metadata sent to glance by modifying the sent_to_glance dict
+9 -3
View File
@@ -161,12 +161,12 @@ class PaginationParamsTest(test.TestCase):
def test_no_params(self):
""" Test no params. """
req = Request.blank('/')
self.assertEqual(common.get_pagination_params(req), (0, 0))
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
""" Test valid marker param. """
req = Request.blank('/?marker=1')
self.assertEqual(common.get_pagination_params(req), (1, 0))
self.assertEqual(common.get_pagination_params(req), {'marker': 1})
def test_invalid_marker(self):
""" Test invalid marker param. """
@@ -177,10 +177,16 @@ class PaginationParamsTest(test.TestCase):
def test_valid_limit(self):
""" Test valid limit param. """
req = Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), (0, 10))
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
""" Test invalid limit param. """
req = Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
""" Test valid limit and marker parameters. """
req = Request.blank('/?limit=20&marker=40')
self.assertEqual(common.get_pagination_params(req),
{'marker': 40, 'limit': 20})
+31 -35
View File
@@ -87,6 +87,19 @@ class FlavorsTest(test.TestCase):
]
self.assertEqual(flavors, expected)
def test_get_empty_flavor_list_v1_0(self):
def _return_empty(self):
return {}
self.stubs.Set(nova.db.api, "instance_type_get_all",
_return_empty)
req = webob.Request.blank('/v1.0/flavors')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavors = json.loads(res.body)["flavors"]
expected = []
self.assertEqual(flavors, expected)
def test_get_flavor_list_detail_v1_0(self):
req = webob.Request.blank('/v1.0/flavors/detail')
res = req.get_response(fakes.wsgi_app())
@@ -146,13 +159,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/flavors/12",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/flavors/12",
"href": "http://localhost/flavors/12",
},
],
}
@@ -175,13 +182,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/flavors/1",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/flavors/1",
"href": "http://localhost/flavors/1",
},
],
},
@@ -195,13 +196,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/flavors/2",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/flavors/2",
"href": "http://localhost/flavors/2",
},
],
},
@@ -227,13 +222,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/flavors/1",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/flavors/1",
"href": "http://localhost/flavors/1",
},
],
},
@@ -249,15 +238,22 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/flavors/2",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/flavors/2",
"href": "http://localhost/flavors/2",
},
],
},
]
self.assertEqual(flavor, expected)
def test_get_empty_flavor_list_v1_1(self):
def _return_empty(self):
return {}
self.stubs.Set(nova.db.api, "instance_type_get_all",
_return_empty)
req = webob.Request.blank('/v1.1/flavors')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
flavors = json.loads(res.body)["flavors"]
expected = []
self.assertEqual(flavors, expected)
+140 -90
View File
@@ -24,6 +24,7 @@ import xml.dom.minidom as minidom
from nova import flags
from nova.api import openstack
from nova import test
from nova.tests.api.openstack import fakes
import nova.wsgi
@@ -31,13 +32,14 @@ import nova.wsgi
FLAGS = flags.FLAGS
class ImageMetaDataTest(unittest.TestCase):
class ImageMetaDataTest(test.TestCase):
IMAGE_FIXTURES = [
{'status': 'active',
'name': 'image1',
'deleted': False,
'container_format': None,
'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -53,6 +55,7 @@ class ImageMetaDataTest(unittest.TestCase):
'name': 'image2',
'deleted': False,
'container_format': None,
'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -68,6 +71,7 @@ class ImageMetaDataTest(unittest.TestCase):
'name': 'image3',
'deleted': False,
'container_format': None,
'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -104,31 +108,10 @@ class ImageMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('value1', res_dict['metadata']['key1'])
def test_index_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'metadata': {
'one': 'two',
'three': 'four',
},
}
output = serializer.index(fixture)
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="three">
four
</meta>
<meta key="one">
two
</meta>
</metadata>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
expected = self.IMAGE_FIXTURES[0]['properties']
self.assertEqual(len(expected), len(res_dict['metadata']))
for (key, value) in res_dict['metadata'].items():
self.assertEqual(value, res_dict['metadata'][key])
def test_show(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
@@ -140,24 +123,6 @@ class ImageMetaDataTest(unittest.TestCase):
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
def test_show_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.show(fixture)
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
two
</meta>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_show_not_found(self):
req = webob.Request.blank('/v1.1/images/1/meta/key9')
req.environ['api.version'] = '1.1'
@@ -179,34 +144,6 @@ class ImageMetaDataTest(unittest.TestCase):
self.assertEqual('value2', res_dict['metadata']['key2'])
self.assertEqual(1, len(res_dict))
def test_create_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'metadata': {
'key9': 'value9',
'key2': 'value2',
'key1': 'value1',
},
}
output = serializer.create(fixture)
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="key2">
value2
</meta>
<meta key="key9">
value9
</meta>
<meta key="key1">
value1
</meta>
</metadata>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_update_item(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
@@ -229,24 +166,6 @@ class ImageMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_update_item_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.update(fixture)
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
two
</meta>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_update_item_too_many_keys(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
@@ -300,3 +219,134 @@ class ImageMetaDataTest(unittest.TestCase):
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
class ImageMetadataXMLSerializationTest(test.TestCase):
def test_index_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'metadata': {
'one': 'two',
'three': 'four',
},
}
output = serializer.serialize(fixture, 'index')
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="three">
four
</meta>
<meta key="one">
two
</meta>
</metadata>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_index_xml_null(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'metadata': {
None: None,
},
}
output = serializer.serialize(fixture, 'index')
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="None">
None
</meta>
</metadata>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_index_xml_unicode(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'metadata': {
u'three': u'Jos\xe9',
},
}
output = serializer.serialize(fixture, 'index')
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString(u"""
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="three">
Jos\xe9
</meta>
</metadata>
""".encode("UTF-8").replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_show_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.serialize(fixture, 'show')
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
two
</meta>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_update_item_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.serialize(fixture, 'update')
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
two
</meta>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_create_xml(self):
serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
fixture = {
'metadata': {
'key9': 'value9',
'key2': 'value2',
'key1': 'value1',
},
}
output = serializer.serialize(fixture, 'create')
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="key2">
value2
</meta>
<meta key="key9">
value9
</meta>
<meta key="key1">
value1
</meta>
</metadata>
""".replace(" ", ""))
self.assertEqual(expected.toxml(), actual.toxml())
+558 -162
View File
@@ -340,6 +340,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.fixtures = self._make_image_fixtures()
fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures)
fakes.stub_out_compute_api_snapshot(self.stubs)
fakes.stub_out_compute_api_backup(self.stubs)
def tearDown(self):
"""Run after each test."""
@@ -364,10 +365,10 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response_list = response_dict["images"]
expected = [{'id': 123, 'name': 'public image'},
{'id': 124, 'name': 'queued backup'},
{'id': 125, 'name': 'saving backup'},
{'id': 126, 'name': 'active backup'},
{'id': 127, 'name': 'killed backup'},
{'id': 124, 'name': 'queued snapshot'},
{'id': 125, 'name': 'saving snapshot'},
{'id': 126, 'name': 'active snapshot'},
{'id': 127, 'name': 'killed snapshot'},
{'id': 129, 'name': None}]
self.assertDictListMatch(response_list, expected)
@@ -393,33 +394,33 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.assertEqual(expected_image, actual_image)
def test_get_image_v1_1(self):
request = webob.Request.blank('/v1.1/images/123')
request = webob.Request.blank('/v1.1/images/124')
response = request.get_response(fakes.wsgi_app())
actual_image = json.loads(response.body)
href = "http://localhost/v1.1/images/123"
href = "http://localhost/v1.1/images/124"
bookmark = "http://localhost/images/124"
expected_image = {
"image": {
"id": 123,
"name": "public image",
"id": 124,
"name": "queued snapshot",
"serverRef": "http://localhost/v1.1/servers/42",
"updated": self.NOW_API_FORMAT,
"created": self.NOW_API_FORMAT,
"status": "ACTIVE",
"status": "QUEUED",
"metadata": {
"instance_ref": "http://localhost/v1.1/servers/42",
"user_id": "1",
},
"links": [{
"rel": "self",
"href": href,
},
{
"rel": "bookmark",
"type": "application/json",
"href": href,
},
{
"rel": "bookmark",
"type": "application/xml",
"href": href,
"href": bookmark,
}],
},
}
@@ -464,34 +465,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.assertEqual(expected_image.toxml(), actual_image.toxml())
def test_get_image_v1_1_xml(self):
request = webob.Request.blank('/v1.1/images/123')
request.accept = "application/xml"
response = request.get_response(fakes.wsgi_app())
actual_image = minidom.parseString(response.body.replace(" ", ""))
expected_href = "http://localhost/v1.1/images/123"
expected_now = self.NOW_API_FORMAT
expected_image = minidom.parseString("""
<image id="123"
name="public image"
updated="%(expected_now)s"
created="%(expected_now)s"
status="ACTIVE"
xmlns="http://docs.openstack.org/compute/api/v1.1">
<links>
<link href="%(expected_href)s" rel="self"/>
<link href="%(expected_href)s" rel="bookmark"
type="application/json" />
<link href="%(expected_href)s" rel="bookmark"
type="application/xml" />
</links>
</image>
""".replace(" ", "") % (locals()))
self.assertEqual(expected_image.toxml(), actual_image.toxml())
def test_get_image_404_json(self):
request = webob.Request.blank('/v1.0/images/NonExistantImage')
response = request.get_response(fakes.wsgi_app())
@@ -579,22 +552,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
continue
href = "http://localhost/v1.1/images/%s" % image["id"]
bookmark = "http://localhost/images/%s" % image["id"]
test_image = {
"id": image["id"],
"name": image["name"],
"links": [{
"rel": "self",
"href": "http://localhost/v1.1/images/%s" % image["id"],
},
{
"rel": "bookmark",
"type": "application/json",
"href": href,
},
{
"rel": "bookmark",
"type": "application/xml",
"href": href,
"href": bookmark,
}],
}
self.assertTrue(test_image in response_list)
@@ -617,14 +585,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 124,
'name': 'queued backup',
'name': 'queued snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
},
{
'id': 125,
'name': 'saving backup',
'name': 'saving snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -632,14 +600,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 126,
'name': 'active backup',
'name': 'active snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE'
},
{
'id': 127,
'name': 'killed backup',
'name': 'killed snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -664,6 +632,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
expected = [{
'id': 123,
'name': 'public image',
'metadata': {},
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -673,19 +642,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/images/123",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/images/123",
"href": "http://localhost/images/123",
}],
},
{
'id': 124,
'name': 'queued backup',
'serverRef': "http://localhost:8774/v1.1/servers/42",
'name': 'queued snapshot',
'metadata': {
u'instance_ref': u'http://localhost/v1.1/servers/42',
u'user_id': u'1',
},
'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
@@ -695,19 +662,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/images/124",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/images/124",
"href": "http://localhost/images/124",
}],
},
{
'id': 125,
'name': 'saving backup',
'serverRef': "http://localhost:8774/v1.1/servers/42",
'name': 'saving snapshot',
'metadata': {
u'instance_ref': u'http://localhost/v1.1/servers/42',
u'user_id': u'1',
},
'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -718,19 +683,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/images/125",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/images/125",
"href": "http://localhost/images/125",
}],
},
{
'id': 126,
'name': 'active backup',
'serverRef': "http://localhost:8774/v1.1/servers/42",
'name': 'active snapshot',
'metadata': {
u'instance_ref': u'http://localhost/v1.1/servers/42',
u'user_id': u'1',
},
'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -740,19 +703,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/images/126",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/images/126",
"href": "http://localhost/images/126",
}],
},
{
'id': 127,
'name': 'killed backup',
'serverRef': "http://localhost:8774/v1.1/servers/42",
'name': 'killed snapshot',
'metadata': {
u'instance_ref': u'http://localhost/v1.1/servers/42',
u'user_id': u'1',
},
'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -762,18 +723,13 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/images/127",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/images/127",
"href": "http://localhost/images/127",
}],
},
{
'id': 129,
'name': None,
'metadata': {},
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -783,13 +739,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/images/129",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/images/129",
"href": "http://localhost/images/129",
}],
},
]
@@ -802,7 +752,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'name': 'testname'}
image_service.index(
context, filters=filters, marker=0, limit=0).AndReturn([])
context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?name=testname')
@@ -817,7 +767,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.index(
context, filters=filters, marker=0, limit=0).AndReturn([])
context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?status=ACTIVE')
@@ -832,7 +782,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'property-test': '3'}
image_service.index(
context, filters=filters, marker=0, limit=0).AndReturn([])
context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?property-test=3')
@@ -847,7 +797,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.index(
context, filters=filters, marker=0, limit=0).AndReturn([])
context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname')
@@ -862,7 +812,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {}
image_service.index(
context, filters=filters, marker=0, limit=0).AndReturn([])
context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images')
@@ -877,7 +827,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'name': 'testname'}
image_service.detail(
context, filters=filters, marker=0, limit=0).AndReturn([])
context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?name=testname')
@@ -892,7 +842,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.detail(
context, filters=filters, marker=0, limit=0).AndReturn([])
context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?status=ACTIVE')
@@ -907,7 +857,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'property-test': '3'}
image_service.detail(
context, filters=filters, marker=0, limit=0).AndReturn([])
context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?property-test=3')
@@ -922,7 +872,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.detail(
context, filters=filters, marker=0, limit=0).AndReturn([])
context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname')
@@ -937,7 +887,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {}
image_service.detail(
context, filters=filters, marker=0, limit=0).AndReturn([])
context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail')
@@ -969,8 +919,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.assertEqual(res.status_int, 404)
def test_create_image(self):
body = dict(image=dict(serverId='123', name='Backup 1'))
body = dict(image=dict(serverId='123', name='Snapshot 1'))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -978,9 +927,95 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
def test_create_snapshot_no_name(self):
"""Name is required for snapshots"""
body = dict(image=dict(serverId='123'))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_backup_no_name(self):
"""Name is also required for backups"""
body = dict(image=dict(serverId='123', image_type='backup',
backup_type='daily', rotation=1))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_backup_with_rotation_and_backup_type(self):
"""The happy path for creating backups
Creating a backup is an admin-only operation, as opposed to snapshots
which are available to anybody.
"""
# FIXME(sirp): teardown needed?
FLAGS.allow_admin_api = True
# FIXME(sirp): should the fact that backups are admin_only be a FLAG
body = dict(image=dict(serverId='123', image_type='backup',
name='Backup 1',
backup_type='daily', rotation=1))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
def test_create_backup_no_rotation(self):
"""Rotation is required for backup requests"""
# FIXME(sirp): teardown needed?
FLAGS.allow_admin_api = True
# FIXME(sirp): should the fact that backups are admin_only be a FLAG
body = dict(image=dict(serverId='123', name='daily',
image_type='backup', backup_type='daily'))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_backup_no_backup_type(self):
"""Backup Type (daily or weekly) is required for backup requests"""
# FIXME(sirp): teardown needed?
FLAGS.allow_admin_api = True
# FIXME(sirp): should the fact that backups are admin_only be a FLAG
body = dict(image=dict(serverId='123', name='daily',
image_type='backup', rotation=1))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_image_with_invalid_image_type(self):
"""Valid image_types are snapshot | daily | weekly"""
# FIXME(sirp): teardown needed?
FLAGS.allow_admin_api = True
# FIXME(sirp): should the fact that backups are admin_only be a FLAG
body = dict(image=dict(serverId='123', image_type='monthly',
rotation=1))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_image_no_server_id(self):
body = dict(image=dict(name='Backup 1'))
body = dict(image=dict(name='Snapshot 1'))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -990,7 +1025,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
def test_create_image_v1_1(self):
body = dict(image=dict(serverRef='123', name='Backup 1'))
body = dict(image=dict(serverRef='123', name='Snapshot 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -1011,6 +1046,19 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
result = json.loads(response.body)
self.assertEqual(result['image']['serverRef'], serverRef)
def test_create_image_v1_1_actual_server_ref_port(self):
serverRef = 'http://localhost:8774/v1.1/servers/1'
body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
result = json.loads(response.body)
self.assertEqual(result['image']['serverRef'], serverRef)
def test_create_image_v1_1_server_ref_bad_hostname(self):
serverRef = 'http://asdf/v1.1/servers/1'
@@ -1022,42 +1070,9 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
def test_create_image_v1_1_xml_serialization(self):
body = dict(image=dict(serverRef='123', name='Backup 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
req.headers["accept"] = "application/xml"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
resp_xml = minidom.parseString(response.body.replace(" ", ""))
expected_href = "http://localhost/v1.1/images/123"
expected_image = minidom.parseString("""
<image
created="None"
id="123"
name="Backup 1"
serverRef="http://localhost/v1.1/servers/123"
status="ACTIVE"
updated="None"
xmlns="http://docs.openstack.org/compute/api/v1.1">
<links>
<link href="%(expected_href)s" rel="self"/>
<link href="%(expected_href)s" rel="bookmark"
type="application/json" />
<link href="%(expected_href)s" rel="bookmark"
type="application/xml" />
</links>
</image>
""".replace(" ", "") % (locals()))
self.assertEqual(expected_image.toxml(), resp_xml.toxml())
def test_create_image_v1_1_no_server_ref(self):
body = dict(image=dict(name='Backup 1'))
body = dict(image=dict(name='Snapshot 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -1084,19 +1099,21 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
status='active', properties={})
image_id += 1
# Backup for User 1
server_ref = 'http://localhost:8774/v1.1/servers/42'
backup_properties = {'instance_ref': server_ref, 'user_id': '1'}
# Snapshot for User 1
server_ref = 'http://localhost/v1.1/servers/42'
snapshot_properties = {'instance_ref': server_ref, 'user_id': '1'}
for status in ('queued', 'saving', 'active', 'killed'):
add_fixture(id=image_id, name='%s backup' % status,
add_fixture(id=image_id, name='%s snapshot' % status,
is_public=False, status=status,
properties=backup_properties)
properties=snapshot_properties)
image_id += 1
# Backup for User 2
other_backup_properties = {'instance_id': '43', 'user_id': '2'}
add_fixture(id=image_id, name='someone elses backup', is_public=False,
status='active', properties=other_backup_properties)
# Snapshot for User 2
other_snapshot_properties = {'instance_id': '43', 'user_id': '2'}
add_fixture(id=image_id, name='someone elses snapshot',
is_public=False, status='active',
properties=other_snapshot_properties)
image_id += 1
# Image without a name
@@ -1105,3 +1122,382 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_id += 1
return fixtures
class ImageXMLSerializationTest(test.TestCase):
TIMESTAMP = "2010-10-11T10:30:22Z"
SERVER_HREF = 'http://localhost/v1.1/servers/123'
IMAGE_HREF = 'http://localhost/v1.1/images/%s'
def test_show(self):
serializer = images.ImageXMLSerializer()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'serverRef': self.SERVER_HREF,
'status': 'ACTIVE',
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % (1,),
'rel': 'bookmark',
'type': 'application/json',
},
],
},
}
output = serializer.serialize(fixture, 'show')
actual = minidom.parseString(output.replace(" ", ""))
expected_server_href = self.SERVER_HREF
expected_href = self.IMAGE_HREF % (1, )
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
<image id="1"
name="Image1"
serverRef="%(expected_server_href)s"
updated="%(expected_now)s"
created="%(expected_now)s"
status="ACTIVE"
xmlns="http://docs.openstack.org/compute/api/v1.1">
<links>
<link href="%(expected_href)s" rel="bookmark"
type="application/json" />
</links>
<metadata>
<meta key="key1">
value1
</meta>
</metadata>
</image>
""".replace(" ", "") % (locals()))
self.assertEqual(expected.toxml(), actual.toxml())
def test_show_zero_metadata(self):
serializer = images.ImageXMLSerializer()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'serverRef': self.SERVER_HREF,
'status': 'ACTIVE',
'metadata': {},
'links': [
{
'href': self.IMAGE_HREF % (1,),
'rel': 'bookmark',
'type': 'application/json',
},
],
},
}
output = serializer.serialize(fixture, 'show')
actual = minidom.parseString(output.replace(" ", ""))
expected_server_href = self.SERVER_HREF
expected_href = self.IMAGE_HREF % (1, )
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
<image id="1"
name="Image1"
serverRef="%(expected_server_href)s"
updated="%(expected_now)s"
created="%(expected_now)s"
status="ACTIVE"
xmlns="http://docs.openstack.org/compute/api/v1.1">
<links>
<link href="%(expected_href)s" rel="bookmark"
type="application/json" />
</links>
<metadata />
</image>
""".replace(" ", "") % (locals()))
self.assertEqual(expected.toxml(), actual.toxml())
def test_show_image_no_metadata_key(self):
serializer = images.ImageXMLSerializer()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'serverRef': self.SERVER_HREF,
'status': 'ACTIVE',
'links': [
{
'href': self.IMAGE_HREF % (1,),
'rel': 'bookmark',
'type': 'application/json',
},
],
},
}
output = serializer.serialize(fixture, 'show')
actual = minidom.parseString(output.replace(" ", ""))
expected_server_href = self.SERVER_HREF
expected_href = self.IMAGE_HREF % (1, )
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
<image id="1"
name="Image1"
serverRef="%(expected_server_href)s"
updated="%(expected_now)s"
created="%(expected_now)s"
status="ACTIVE"
xmlns="http://docs.openstack.org/compute/api/v1.1">
<links>
<link href="%(expected_href)s" rel="bookmark"
type="application/json" />
</links>
<metadata />
</image>
""".replace(" ", "") % (locals()))
self.assertEqual(expected.toxml(), actual.toxml())
def test_index(self):
serializer = images.ImageXMLSerializer()
fixtures = {
'images': [
{
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'serverRef': self.SERVER_HREF,
'status': 'ACTIVE',
'links': [
{
'href': 'http://localhost/v1.1/images/1',
'rel': 'bookmark',
'type': 'application/json',
},
],
},
{
'id': 2,
'name': 'queued image',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'serverRef': self.SERVER_HREF,
'status': 'QUEUED',
'links': [
{
'href': 'http://localhost/v1.1/images/2',
'rel': 'bookmark',
'type': 'application/json',
},
],
},
],
}
output = serializer.serialize(fixtures, 'index')
actual = minidom.parseString(output.replace(" ", ""))
expected_serverRef = self.SERVER_HREF
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
<images xmlns="http://docs.openstack.org/compute/api/v1.1">
<image id="1"
name="Image1"
serverRef="%(expected_serverRef)s"
updated="%(expected_now)s"
created="%(expected_now)s"
status="ACTIVE">
<links>
<link href="http://localhost/v1.1/images/1" rel="bookmark"
type="application/json" />
</links>
</image>
<image id="2"
name="queued image"
serverRef="%(expected_serverRef)s"
updated="%(expected_now)s"
created="%(expected_now)s"
status="QUEUED">
<links>
<link href="http://localhost/v1.1/images/2" rel="bookmark"
type="application/json" />
</links>
</image>
</images>
""".replace(" ", "") % (locals()))
self.assertEqual(expected.toxml(), actual.toxml())
def test_index_zero_images(self):
serializer = images.ImageXMLSerializer()
fixtures = {
'images': [],
}
output = serializer.serialize(fixtures, 'index')
actual = minidom.parseString(output.replace(" ", ""))
expected_serverRef = self.SERVER_HREF
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
<images xmlns="http://docs.openstack.org/compute/api/v1.1" />
""".replace(" ", "") % (locals()))
self.assertEqual(expected.toxml(), actual.toxml())
def test_detail(self):
serializer = images.ImageXMLSerializer()
fixtures = {
'images': [
{
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'serverRef': self.SERVER_HREF,
'status': 'ACTIVE',
'metadata': {
'key1': 'value1',
'key2': 'value2',
},
'links': [
{
'href': 'http://localhost/v1.1/images/1',
'rel': 'bookmark',
'type': 'application/json',
},
],
},
{
'id': 2,
'name': 'queued image',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'serverRef': self.SERVER_HREF,
'metadata': {},
'status': 'QUEUED',
'links': [
{
'href': 'http://localhost/v1.1/images/2',
'rel': 'bookmark',
'type': 'application/json',
},
],
},
],
}
output = serializer.serialize(fixtures, 'detail')
actual = minidom.parseString(output.replace(" ", ""))
expected_serverRef = self.SERVER_HREF
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
<images xmlns="http://docs.openstack.org/compute/api/v1.1">
<image id="1"
name="Image1"
serverRef="%(expected_serverRef)s"
updated="%(expected_now)s"
created="%(expected_now)s"
status="ACTIVE">
<links>
<link href="http://localhost/v1.1/images/1" rel="bookmark"
type="application/json" />
</links>
<metadata>
<meta key="key2">
value2
</meta>
<meta key="key1">
value1
</meta>
</metadata>
</image>
<image id="2"
name="queued image"
serverRef="%(expected_serverRef)s"
updated="%(expected_now)s"
created="%(expected_now)s"
status="QUEUED">
<links>
<link href="http://localhost/v1.1/images/2" rel="bookmark"
type="application/json" />
</links>
<metadata />
</image>
</images>
""".replace(" ", "") % (locals()))
self.assertEqual(expected.toxml(), actual.toxml())
def test_create(self):
serializer = images.ImageXMLSerializer()
fixture = {
'image': {
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
'serverRef': self.SERVER_HREF,
'status': 'ACTIVE',
'metadata': {
'key1': 'value1',
},
'links': [
{
'href': self.IMAGE_HREF % (1,),
'rel': 'bookmark',
'type': 'application/json',
},
],
},
}
output = serializer.serialize(fixture, 'create')
actual = minidom.parseString(output.replace(" ", ""))
expected_server_href = self.SERVER_HREF
expected_href = self.IMAGE_HREF % (1, )
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
<image id="1"
name="Image1"
serverRef="%(expected_server_href)s"
updated="%(expected_now)s"
created="%(expected_now)s"
status="ACTIVE"
xmlns="http://docs.openstack.org/compute/api/v1.1">
<links>
<link href="%(expected_href)s" rel="bookmark"
type="application/json" />
</links>
<metadata>
<meta key="key1">
value1
</meta>
</metadata>
</image>
""".replace(" ", "") % (locals()))
self.assertEqual(expected.toxml(), actual.toxml())
+57 -50
View File
@@ -118,7 +118,7 @@ def instance_update(context, instance_id, kwargs):
return stub_instance(instance_id)
def instance_address(context, instance_id):
def instance_addresses(context, instance_id):
return None
@@ -173,7 +173,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
"metadata": metadata,
"uuid": uuid}
instance["fixed_ip"] = {
instance["fixed_ips"] = {
"address": private_address,
"floating_ips": [{"address":ip} for ip in public_addresses]}
@@ -220,10 +220,10 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.db.api, 'instance_add_security_group',
return_security_group)
self.stubs.Set(nova.db.api, 'instance_update', instance_update)
self.stubs.Set(nova.db.api, 'instance_get_fixed_address',
instance_address)
self.stubs.Set(nova.db.api, 'instance_get_fixed_addresses',
instance_addresses)
self.stubs.Set(nova.db.api, 'instance_get_floating_address',
instance_address)
instance_addresses)
self.stubs.Set(nova.compute.API, 'pause', fake_compute_api)
self.stubs.Set(nova.compute.API, 'unpause', fake_compute_api)
self.stubs.Set(nova.compute.API, 'suspend', fake_compute_api)
@@ -290,13 +290,7 @@ class ServersTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/servers/1",
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/servers/1",
"href": "http://localhost/servers/1",
},
]
@@ -427,12 +421,13 @@ class ServersTest(test.TestCase):
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
addresses = res_dict['server']['addresses']
self.assertEqual(len(addresses["public"]), len(public))
self.assertEqual(addresses["public"][0],
{"version": 4, "addr": public[0]})
self.assertEqual(len(addresses["private"]), 1)
self.assertEqual(addresses["private"][0],
{"version": 4, "addr": private})
# RM(4047): Figure otu what is up with the 1.1 api and multi-nic
#self.assertEqual(len(addresses["public"]), len(public))
#self.assertEqual(addresses["public"][0],
# {"version": 4, "addr": public[0]})
#self.assertEqual(len(addresses["private"]), 1)
#self.assertEqual(addresses["private"][0],
# {"version": 4, "addr": private})
def test_get_server_list(self):
req = webob.Request.blank('/v1.0/servers')
@@ -514,13 +509,7 @@ class ServersTest(test.TestCase):
},
{
"rel": "bookmark",
"type": "application/json",
"href": "http://localhost/v1.1/servers/%d" % (i,),
},
{
"rel": "bookmark",
"type": "application/xml",
"href": "http://localhost/v1.1/servers/%d" % (i,),
"href": "http://localhost/servers/%d" % (i,),
},
]
@@ -596,7 +585,7 @@ class ServersTest(test.TestCase):
def fake_method(*args, **kwargs):
pass
def project_get_network(context, user_id):
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
@@ -608,7 +597,8 @@ class ServersTest(test.TestCase):
def image_id_from_hash(*args, **kwargs):
return 2
self.stubs.Set(nova.db.api, 'project_get_network', project_get_network)
self.stubs.Set(nova.db.api, 'project_get_networks',
project_get_networks)
self.stubs.Set(nova.db.api, 'instance_create', instance_create)
self.stubs.Set(nova.rpc, 'cast', fake_method)
self.stubs.Set(nova.rpc, 'call', fake_method)
@@ -915,7 +905,7 @@ class ServersTest(test.TestCase):
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 422)
self.assertEqual(res.status_int, 400)
def test_update_nonstring_name(self):
""" Confirm that update is filtering params """
@@ -1567,6 +1557,23 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_migrate_server(self):
"""This is basically the same as resize, only we provide the `migrate`
attribute in the body's dict.
"""
req = self.webreq('/1/action', 'POST', dict(migrate=None))
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(self.resize_called, True)
def test_shutdown_status(self):
new_server = return_server_with_power_state(power_state.SHUTDOWN)
self.stubs.Set(nova.db.api, 'instance_get', new_server)
@@ -1601,7 +1608,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
"imageId": "1",
"flavorId": "1",
}}
self.assertEquals(request, expected)
self.assertEquals(request['body'], expected)
def test_request_with_empty_metadata(self):
serial_request = """
@@ -1616,7 +1623,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
"flavorId": "1",
"metadata": {},
}}
self.assertEquals(request, expected)
self.assertEquals(request['body'], expected)
def test_request_with_empty_personality(self):
serial_request = """
@@ -1631,7 +1638,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
"flavorId": "1",
"personality": [],
}}
self.assertEquals(request, expected)
self.assertEquals(request['body'], expected)
def test_request_with_empty_metadata_and_personality(self):
serial_request = """
@@ -1648,7 +1655,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
"metadata": {},
"personality": [],
}}
self.assertEquals(request, expected)
self.assertEquals(request['body'], expected)
def test_request_with_empty_metadata_and_personality_reversed(self):
serial_request = """
@@ -1665,7 +1672,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
"metadata": {},
"personality": [],
}}
self.assertEquals(request, expected)
self.assertEquals(request['body'], expected)
def test_request_with_one_personality(self):
serial_request = """
@@ -1677,7 +1684,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
</server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
self.assertEquals(request["server"]["personality"], expected)
self.assertEquals(request['body']["server"]["personality"], expected)
def test_request_with_two_personalities(self):
serial_request = """
@@ -1688,7 +1695,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": "aabbccdd"},
{"path": "/etc/sudoers", "contents": "abcd"}]
self.assertEquals(request["server"]["personality"], expected)
self.assertEquals(request['body']["server"]["personality"], expected)
def test_request_second_personality_node_ignored(self):
serial_request = """
@@ -1703,7 +1710,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
</server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
self.assertEquals(request["server"]["personality"], expected)
self.assertEquals(request['body']["server"]["personality"], expected)
def test_request_with_one_personality_missing_path(self):
serial_request = """
@@ -1712,7 +1719,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<personality><file>aabbccdd</file></personality></server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"contents": "aabbccdd"}]
self.assertEquals(request["server"]["personality"], expected)
self.assertEquals(request['body']["server"]["personality"], expected)
def test_request_with_one_personality_empty_contents(self):
serial_request = """
@@ -1721,7 +1728,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<personality><file path="/etc/conf"></file></personality></server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": ""}]
self.assertEquals(request["server"]["personality"], expected)
self.assertEquals(request['body']["server"]["personality"], expected)
def test_request_with_one_personality_empty_contents_variation(self):
serial_request = """
@@ -1730,7 +1737,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<personality><file path="/etc/conf"/></personality></server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": ""}]
self.assertEquals(request["server"]["personality"], expected)
self.assertEquals(request['body']["server"]["personality"], expected)
def test_request_with_one_metadata(self):
serial_request = """
@@ -1742,7 +1749,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
</server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": "beta"}
self.assertEquals(request["server"]["metadata"], expected)
self.assertEquals(request['body']["server"]["metadata"], expected)
def test_request_with_two_metadata(self):
serial_request = """
@@ -1755,7 +1762,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
</server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": "beta", "foo": "bar"}
self.assertEquals(request["server"]["metadata"], expected)
self.assertEquals(request['body']["server"]["metadata"], expected)
def test_request_with_metadata_missing_value(self):
serial_request = """
@@ -1767,7 +1774,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
</server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": ""}
self.assertEquals(request["server"]["metadata"], expected)
self.assertEquals(request['body']["server"]["metadata"], expected)
def test_request_with_two_metadata_missing_value(self):
serial_request = """
@@ -1780,7 +1787,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
</server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": "", "delta": ""}
self.assertEquals(request["server"]["metadata"], expected)
self.assertEquals(request['body']["server"]["metadata"], expected)
def test_request_with_metadata_missing_key(self):
serial_request = """
@@ -1792,7 +1799,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
</server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = {"": "beta"}
self.assertEquals(request["server"]["metadata"], expected)
self.assertEquals(request['body']["server"]["metadata"], expected)
def test_request_with_two_metadata_missing_key(self):
serial_request = """
@@ -1805,7 +1812,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
</server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = {"": "gamma"}
self.assertEquals(request["server"]["metadata"], expected)
self.assertEquals(request['body']["server"]["metadata"], expected)
def test_request_with_metadata_duplicate_key(self):
serial_request = """
@@ -1818,7 +1825,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
</server>"""
request = self.deserializer.deserialize(serial_request, 'create')
expected = {"foo": "baz"}
self.assertEquals(request["server"]["metadata"], expected)
self.assertEquals(request['body']["server"]["metadata"], expected)
def test_canonical_request_from_docs(self):
serial_request = """
@@ -1864,7 +1871,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
],
}}
request = self.deserializer.deserialize(serial_request, 'create')
self.assertEqual(request, expected)
self.assertEqual(request['body'], expected)
def test_request_xmlser_with_flavor_image_href(self):
serial_request = """
@@ -1874,9 +1881,9 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
flavorRef="http://localhost:8774/v1.1/flavors/1">
</server>"""
request = self.deserializer.deserialize(serial_request, 'create')
self.assertEquals(request["server"]["flavorRef"],
self.assertEquals(request['body']["server"]["flavorRef"],
"http://localhost:8774/v1.1/flavors/1")
self.assertEquals(request["server"]["imageRef"],
self.assertEquals(request['body']["server"]["imageRef"],
"http://localhost:8774/v1.1/images/1")
@@ -1941,7 +1948,7 @@ class TestServerInstanceCreation(test.TestCase):
def _get_create_request_json(self, body_dict):
req = webob.Request.blank('/v1.0/servers')
req.content_type = 'application/json'
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body_dict)
return req
+94 -53
View File
@@ -12,8 +12,7 @@ class RequestTest(test.TestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertRaises(exception.InvalidContentType,
request.get_content_type)
self.assertEqual(None, request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
@@ -76,24 +75,48 @@ class RequestTest(test.TestCase):
self.assertEqual(result, "application/json")
class DictSerializerTest(test.TestCase):
class ActionDispatcherTest(test.TestCase):
def test_dispatch(self):
serializer = wsgi.DictSerializer()
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.serialize({}, 'create'), 'pants')
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.serialize({}, 'update'), 'trousers')
self.assertEqual(serializer.dispatch({}, action='create'), 'pants')
def test_dispatch_action_None(self):
serializer = wsgi.DictSerializer()
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.serialize({}, None), 'trousers')
self.assertEqual(serializer.dispatch({}, action=None), 'trousers')
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')
class ResponseHeadersSerializerTest(test.TestCase):
def test_default(self):
serializer = wsgi.ResponseHeadersSerializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'asdf')
self.assertEqual(response.status_int, 200)
def test_custom(self):
class Serializer(wsgi.ResponseHeadersSerializer):
def update(self, response, data):
response.status_int = 404
response.headers['X-Custom-Header'] = data['v']
serializer = Serializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'update')
self.assertEqual(response.status_int, 404)
self.assertEqual(response.headers['X-Custom-Header'], '123')
class DictSerializerTest(test.TestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual(serializer.serialize({}, 'update'), '')
class XMLDictSerializerTest(test.TestCase):
@@ -117,23 +140,9 @@ class JSONDictSerializerTest(test.TestCase):
class TextDeserializerTest(test.TestCase):
def test_dispatch(self):
deserializer = wsgi.TextDeserializer()
deserializer.create = lambda x: 'pants'
deserializer.default = lambda x: 'trousers'
self.assertEqual(deserializer.deserialize({}, 'create'), 'pants')
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
deserializer.create = lambda x: 'pants'
deserializer.default = lambda x: 'trousers'
self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers')
def test_dispatch_action_None(self):
deserializer = wsgi.TextDeserializer()
deserializer.create = lambda x: 'pants'
deserializer.default = lambda x: 'trousers'
self.assertEqual(deserializer.deserialize({}, None), 'trousers')
self.assertEqual(deserializer.deserialize({}, 'update'), {})
class JSONDeserializerTest(test.TestCase):
@@ -144,12 +153,17 @@ class JSONDeserializerTest(test.TestCase):
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = dict(a={
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': dict(c1='1')}],
'd': {'e': '1'},
'f': '1'})
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(deserializer.deserialize(data), as_dict)
@@ -163,23 +177,44 @@ class XMLDeserializerTest(test.TestCase):
<f>1</f>
</a>
""".strip()
as_dict = dict(a={
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': dict(c1='1')}],
'd': {'e': '1'},
'f': '1'})
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
metadata = {'plurals': {'bs': 'b', 'ts': 't'}}
deserializer = wsgi.XMLDeserializer(metadata=metadata)
self.assertEqual(deserializer.deserialize(xml), as_dict)
def test_xml_empty(self):
xml = """<a></a>"""
as_dict = {"a": {}}
as_dict = {"body": {"a": {}}}
deserializer = wsgi.XMLDeserializer()
self.assertEqual(deserializer.deserialize(xml), as_dict)
class RequestHeadersDeserializerTest(test.TestCase):
def test_default(self):
deserializer = wsgi.RequestHeadersDeserializer()
req = wsgi.Request.blank('/')
self.assertEqual(deserializer.deserialize(req, 'asdf'), {})
def test_custom(self):
class Deserializer(wsgi.RequestHeadersDeserializer):
def update(self, request):
return {'a': request.headers['X-Custom-Header']}
deserializer = Deserializer()
req = wsgi.Request.blank('/')
req.headers['X-Custom-Header'] = 'b'
self.assertEqual(deserializer.deserialize(req, 'update'), {'a': 'b'})
class ResponseSerializerTest(test.TestCase):
def setUp(self):
class JSONSerializer(object):
@@ -190,29 +225,36 @@ class ResponseSerializerTest(test.TestCase):
def serialize(self, data, action='default'):
return 'pew_xml'
self.serializers = {
class HeadersSerializer(object):
def serialize(self, response, data, action):
response.status_int = 404
self.body_serializers = {
'application/json': JSONSerializer(),
'application/XML': XMLSerializer(),
}
self.serializer = wsgi.ResponseSerializer(serializers=self.serializers)
self.serializer = wsgi.ResponseSerializer(self.body_serializers,
HeadersSerializer())
def tearDown(self):
pass
def test_get_serializer(self):
self.assertEqual(self.serializer.get_serializer('application/json'),
self.serializers['application/json'])
ctype = 'application/json'
self.assertEqual(self.serializer.get_body_serializer(ctype),
self.body_serializers[ctype])
def test_get_serializer_unknown_content_type(self):
self.assertRaises(exception.InvalidContentType,
self.serializer.get_serializer,
self.serializer.get_body_serializer,
'application/unknown')
def test_serialize_response(self):
response = self.serializer.serialize({}, 'application/json')
self.assertEqual(response.headers['Content-Type'], 'application/json')
self.assertEqual(response.body, 'pew_json')
self.assertEqual(response.status_int, 404)
def test_serialize_response_dict_to_unknown_content_type(self):
self.assertRaises(exception.InvalidContentType,
@@ -230,24 +272,23 @@ class RequestDeserializerTest(test.TestCase):
def deserialize(self, data, action='default'):
return 'pew_xml'
self.deserializers = {
self.body_deserializers = {
'application/json': JSONDeserializer(),
'application/XML': XMLDeserializer(),
}
self.deserializer = wsgi.RequestDeserializer(
deserializers=self.deserializers)
self.deserializer = wsgi.RequestDeserializer(self.body_deserializers)
def tearDown(self):
pass
def test_get_deserializer(self):
expected = self.deserializer.get_deserializer('application/json')
self.assertEqual(expected, self.deserializers['application/json'])
expected = self.deserializer.get_body_deserializer('application/json')
self.assertEqual(expected, self.body_deserializers['application/json'])
def test_get_deserializer_unknown_content_type(self):
self.assertRaises(exception.InvalidContentType,
self.deserializer.get_deserializer,
self.deserializer.get_body_deserializer,
'application/unknown')
def test_get_expected_content_type(self):
+5 -5
View File
@@ -34,7 +34,7 @@ FLAGS.verbose = True
def zone_get(context, zone_id):
return dict(id=1, api_url='http://example.com', username='bob',
password='xxx')
password='xxx', weight_scale=1.0, weight_offset=0.0)
def zone_create(context, values):
@@ -57,9 +57,9 @@ def zone_delete(context, zone_id):
def zone_get_all_scheduler(*args):
return [
dict(id=1, api_url='http://example.com', username='bob',
password='xxx'),
password='xxx', weight_scale=1.0, weight_offset=0.0),
dict(id=2, api_url='http://example.org', username='alice',
password='qwerty'),
password='qwerty', weight_scale=1.0, weight_offset=0.0),
]
@@ -70,9 +70,9 @@ def zone_get_all_scheduler_empty(*args):
def zone_get_all_db(context):
return [
dict(id=1, api_url='http://example.com', username='bob',
password='xxx'),
password='xxx', weight_scale=1.0, weight_offset=0.0),
dict(id=2, api_url='http://example.org', username='alice',
password='qwerty'),
password='qwerty', weight_scale=1.0, weight_offset=0.0),
]
+332 -33
View File
@@ -20,10 +20,327 @@
import time
from nova import db
from nova import exception
from nova import test
from nova import utils
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
if key in self.values:
return self.values[key]
else:
raise NotImplementedError()
def __repr__(self):
return '<FakeModel: %s>' % self.values
def stub_out(stubs, funcs):
"""Set the stubs in mapping in the db api."""
for func in funcs:
func_name = '_'.join(func.__name__.split('_')[1:])
stubs.Set(db, func_name, func)
def stub_out_db_network_api(stubs):
network_fields = {'id': 0,
'cidr': '192.168.0.0/24',
'netmask': '255.255.255.0',
'cidr_v6': 'dead:beef::/64',
'netmask_v6': '64',
'project_id': 'fake',
'label': 'fake',
'gateway': '192.168.0.1',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'broadcast': '192.168.0.255',
'gateway_v6': 'dead:beef::1',
'dns': '192.168.0.1',
'vlan': None,
'host': None,
'injected': False,
'vpn_public_address': '192.168.0.2'}
fixed_ip_fields = {'id': 0,
'network_id': 0,
'network': FakeModel(network_fields),
'address': '192.168.0.100',
'instance': False,
'instance_id': 0,
'allocated': False,
'virtual_interface_id': 0,
'virtual_interface': None,
'floating_ips': []}
flavor_fields = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.1.100',
'fixed_ip_id': None,
'fixed_ip': None,
'project_id': None,
'auto_assigned': False}
virtual_interface_fields = {'id': 0,
'address': 'DE:AD:BE:EF:00:00',
'network_id': 0,
'instance_id': 0,
'network': FakeModel(network_fields)}
fixed_ips = [fixed_ip_fields]
floating_ips = [floating_ip_fields]
virtual_interfacees = [virtual_interface_fields]
networks = [network_fields]
def fake_floating_ip_allocate_address(context, project_id):
ips = filter(lambda i: i['fixed_ip_id'] == None \
and i['project_id'] == None,
floating_ips)
if not ips:
raise exception.NoMoreFloatingIps()
ips[0]['project_id'] = project_id
return FakeModel(ips[0])
def fake_floating_ip_deallocate(context, address):
ips = filter(lambda i: i['address'] == address,
floating_ips)
if ips:
ips[0]['project_id'] = None
ips[0]['auto_assigned'] = False
def fake_floating_ip_disassociate(context, address):
ips = filter(lambda i: i['address'] == address,
floating_ips)
if ips:
fixed_ip_address = None
if ips[0]['fixed_ip']:
fixed_ip_address = ips[0]['fixed_ip']['address']
ips[0]['fixed_ip'] = None
return fixed_ip_address
def fake_floating_ip_fixed_ip_associate(context, floating_address,
fixed_address):
float = filter(lambda i: i['address'] == floating_address,
floating_ips)
fixed = filter(lambda i: i['address'] == fixed_address,
fixed_ips)
if float and fixed:
float[0]['fixed_ip'] = fixed[0]
float[0]['fixed_ip_id'] = fixed[0]['id']
def fake_floating_ip_get_all_by_host(context, host):
# TODO(jkoelker): Once we get the patches that remove host from
# the floating_ip table, we'll need to stub
# this out
pass
def fake_floating_ip_get_by_address(context, address):
if isinstance(address, FakeModel):
# NOTE(tr3buchet): yo dawg, i heard you like addresses
address = address['address']
ips = filter(lambda i: i['address'] == address,
floating_ips)
if not ips:
raise exception.FloatingIpNotFoundForAddress(address=address)
return FakeModel(ips[0])
def fake_floating_ip_set_auto_assigned(contex, address):
ips = filter(lambda i: i['address'] == address,
floating_ips)
if ips:
ips[0]['auto_assigned'] = True
def fake_fixed_ip_associate(context, address, instance_id):
ips = filter(lambda i: i['address'] == address,
fixed_ips)
if not ips:
raise exception.NoMoreFixedIps()
ips[0]['instance'] = True
ips[0]['instance_id'] = instance_id
def fake_fixed_ip_associate_pool(context, network_id, instance_id):
ips = filter(lambda i: (i['network_id'] == network_id \
or i['network_id'] is None) \
and not i['instance'],
fixed_ips)
if not ips:
raise exception.NoMoreFixedIps()
ips[0]['instance'] = True
ips[0]['instance_id'] = instance_id
return ips[0]['address']
def fake_fixed_ip_create(context, values):
ip = dict(fixed_ip_fields)
ip['id'] = max([i['id'] for i in fixed_ips] or [-1]) + 1
for key in values:
ip[key] = values[key]
return ip['address']
def fake_fixed_ip_disassociate(context, address):
ips = filter(lambda i: i['address'] == address,
fixed_ips)
if ips:
ips[0]['instance_id'] = None
ips[0]['instance'] = None
ips[0]['virtual_interface'] = None
ips[0]['virtual_interface_id'] = None
def fake_fixed_ip_disassociate_all_by_timeout(context, host, time):
return 0
def fake_fixed_ip_get_by_instance(context, instance_id):
ips = filter(lambda i: i['instance_id'] == instance_id,
fixed_ips)
return [FakeModel(i) for i in ips]
def fake_fixed_ip_get_by_address(context, address):
ips = filter(lambda i: i['address'] == address,
fixed_ips)
if ips:
return FakeModel(ips[0])
def fake_fixed_ip_get_network(context, address):
ips = filter(lambda i: i['address'] == address,
fixed_ips)
if ips:
nets = filter(lambda n: n['id'] == ips[0]['network_id'],
networks)
if nets:
return FakeModel(nets[0])
def fake_fixed_ip_update(context, address, values):
ips = filter(lambda i: i['address'] == address,
fixed_ips)
if ips:
for key in values:
ips[0][key] = values[key]
if key == 'virtual_interface_id':
vif = filter(lambda x: x['id'] == values[key],
virtual_interfacees)
if not vif:
continue
fixed_ip_fields['virtual_interface'] = FakeModel(vif[0])
def fake_instance_type_get_by_id(context, id):
if flavor_fields['id'] == id:
return FakeModel(flavor_fields)
def fake_virtual_interface_create(context, values):
vif = dict(virtual_interface_fields)
vif['id'] = max([m['id'] for m in virtual_interfacees] or [-1]) + 1
for key in values:
vif[key] = values[key]
return FakeModel(vif)
def fake_virtual_interface_delete_by_instance(context, instance_id):
addresses = [m for m in virtual_interfacees \
if m['instance_id'] == instance_id]
try:
for address in addresses:
virtual_interfacees.remove(address)
except ValueError:
pass
def fake_virtual_interface_get_by_instance(context, instance_id):
return [FakeModel(m) for m in virtual_interfacees \
if m['instance_id'] == instance_id]
def fake_virtual_interface_get_by_instance_and_network(context,
instance_id,
network_id):
vif = filter(lambda m: m['instance_id'] == instance_id and \
m['network_id'] == network_id,
virtual_interfacees)
if not vif:
return None
return FakeModel(vif[0])
def fake_network_create_safe(context, values):
net = dict(network_fields)
net['id'] = max([n['id'] for n in networks] or [-1]) + 1
for key in values:
net[key] = values[key]
return FakeModel(net)
def fake_network_get(context, network_id):
net = filter(lambda n: n['id'] == network_id, networks)
if not net:
return None
return FakeModel(net[0])
def fake_network_get_all(context):
return [FakeModel(n) for n in networks]
def fake_network_get_all_by_host(context, host):
nets = filter(lambda n: n['host'] == host, networks)
return [FakeModel(n) for n in nets]
def fake_network_get_all_by_instance(context, instance_id):
nets = filter(lambda n: n['instance_id'] == instance_id, networks)
return [FakeModel(n) for n in nets]
def fake_network_set_host(context, network_id, host_id):
nets = filter(lambda n: n['id'] == network_id, networks)
for net in nets:
net['host'] = host_id
return host_id
def fake_network_update(context, network_id, values):
nets = filter(lambda n: n['id'] == network_id, networks)
for net in nets:
for key in values:
net[key] = values[key]
def fake_project_get_networks(context, project_id):
return [FakeModel(n) for n in networks \
if n['project_id'] == project_id]
def fake_queue_get_for(context, topic, node):
return "%s.%s" % (topic, node)
funcs = [fake_floating_ip_allocate_address,
fake_floating_ip_deallocate,
fake_floating_ip_disassociate,
fake_floating_ip_fixed_ip_associate,
fake_floating_ip_get_all_by_host,
fake_floating_ip_get_by_address,
fake_floating_ip_set_auto_assigned,
fake_fixed_ip_associate,
fake_fixed_ip_associate_pool,
fake_fixed_ip_create,
fake_fixed_ip_disassociate,
fake_fixed_ip_disassociate_all_by_timeout,
fake_fixed_ip_get_by_instance,
fake_fixed_ip_get_by_address,
fake_fixed_ip_get_network,
fake_fixed_ip_update,
fake_instance_type_get_by_id,
fake_virtual_interface_create,
fake_virtual_interface_delete_by_instance,
fake_virtual_interface_get_by_instance,
fake_virtual_interface_get_by_instance_and_network,
fake_network_create_safe,
fake_network_get,
fake_network_get_all,
fake_network_get_all_by_host,
fake_network_get_all_by_instance,
fake_network_set_host,
fake_network_update,
fake_project_get_networks,
fake_queue_get_for]
stub_out(stubs, funcs)
def stub_out_db_instance_api(stubs, injected=True):
"""Stubs out the db API for creating Instances."""
@@ -92,20 +409,6 @@ def stub_out_db_instance_api(stubs, injected=True):
'address_v6': 'fe80::a00:3',
'network_id': 'fake_flat'}
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
if key in self.values:
return self.values[key]
else:
raise NotImplementedError()
def fake_instance_type_get_all(context, inactive=0):
return INSTANCE_TYPES
@@ -132,26 +435,22 @@ def stub_out_db_instance_api(stubs, injected=True):
else:
return [FakeModel(flat_network_fields)]
def fake_instance_get_fixed_address(context, instance_id):
return FakeModel(fixed_ip_fields).address
def fake_instance_get_fixed_addresses(context, instance_id):
return [FakeModel(fixed_ip_fields).address]
def fake_instance_get_fixed_address_v6(context, instance_id):
return FakeModel(fixed_ip_fields).address
def fake_instance_get_fixed_addresses_v6(context, instance_id):
return [FakeModel(fixed_ip_fields).address]
def fake_fixed_ip_get_all_by_instance(context, instance_id):
def fake_fixed_ip_get_by_instance(context, instance_id):
return [FakeModel(fixed_ip_fields)]
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
stubs.Set(db, 'network_get_all_by_instance',
fake_network_get_all_by_instance)
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
stubs.Set(db, 'instance_type_get_by_id', fake_instance_type_get_by_id)
stubs.Set(db, 'instance_get_fixed_address',
fake_instance_get_fixed_address)
stubs.Set(db, 'instance_get_fixed_address_v6',
fake_instance_get_fixed_address_v6)
stubs.Set(db, 'network_get_all_by_instance',
fake_network_get_all_by_instance)
stubs.Set(db, 'fixed_ip_get_all_by_instance',
fake_fixed_ip_get_all_by_instance)
funcs = [fake_network_get_by_instance,
fake_network_get_all_by_instance,
fake_instance_type_get_all,
fake_instance_type_get_by_name,
fake_instance_type_get_by_id,
fake_instance_get_fixed_addresses,
fake_instance_get_fixed_addresses_v6,
fake_network_get_all_by_instance,
fake_fixed_ip_get_by_instance]
stub_out(stubs, funcs)

Some files were not shown because too many files have changed in this diff Show More