Merged with trunk

This commit is contained in:
Justin Santa Barbara
2010-08-18 22:14:24 +01:00
83 changed files with 3365 additions and 1312 deletions
+2 -4
View File
@@ -29,8 +29,6 @@ from nova import flags
from nova import rpc
from nova import server
from nova import utils
from nova.auth import manager
from nova.compute import model
from nova.endpoint import admin
from nova.endpoint import api
from nova.endpoint import cloud
@@ -39,10 +37,10 @@ FLAGS = flags.FLAGS
def main(_argv):
"""Load the controllers and start the tornado I/O loop."""
controllers = {
'Cloud': cloud.CloudController(),
'Admin': admin.AdminController()
}
'Admin': admin.AdminController()}
_app = api.APIServerApplication(controllers)
conn = rpc.Connection.instance()
+34
View File
@@ -0,0 +1,34 @@
#!/usr/bin/env python
# pylint: disable-msg=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Nova API daemon.
"""
from nova import api
from nova import flags
from nova import utils
from nova import wsgi
FLAGS = flags.FLAGS
flags.DEFINE_integer('api_port', 8773, 'API port')
if __name__ == '__main__':
utils.default_flagfile()
wsgi.run_server(api.API(), FLAGS.api_port)
+25 -18
View File
@@ -18,8 +18,6 @@
# under the License.
"""
nova-dhcpbridge
Handle lease database updates from DHCP servers.
"""
@@ -42,34 +40,42 @@ from nova.network import service
FLAGS = flags.FLAGS
def add_lease(mac, ip, hostname, interface):
def add_lease(_mac, ip, _hostname, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
service.VlanNetworkService().lease_ip(ip)
else:
rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name),
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
{"method": "lease_ip",
"args" : {"fixed_ip": ip}})
"args": {"fixed_ip": ip}})
def old_lease(mac, ip, hostname, interface):
def old_lease(_mac, _ip, _hostname, _interface):
"""Do nothing, just an old lease update."""
logging.debug("Adopted old lease or got a change of mac/hostname")
def del_lease(mac, ip, hostname, interface):
def del_lease(_mac, ip, _hostname, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
service.VlanNetworkService().release_ip(ip)
else:
rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name),
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
{"method": "release_ip",
"args" : {"fixed_ip": ip}})
"args": {"fixed_ip": ip}})
def init_leases(interface):
"""Get the list of hosts for an interface."""
net = model.get_network_by_interface(interface)
res = ""
for host_name in net.hosts:
res += "%s\n" % linux_net.hostDHCP(net, host_name, net.hosts[host_name])
for address in net.assigned_objs:
res += "%s\n" % linux_net.host_dhcp(address)
return res
def main():
"""Parse environment and arguments and call the approproate action."""
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
utils.default_flagfile(flagfile)
argv = FLAGS(sys.argv)
@@ -79,18 +85,19 @@ def main():
FLAGS.redis_db = 8
FLAGS.network_size = 32
FLAGS.connection_type = 'fake'
FLAGS.fake_network=True
FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver'
FLAGS.fake_network = True
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
action = argv[1]
if action in ['add','del','old']:
if action in ['add', 'del', 'old']:
mac = argv[2]
ip = argv[3]
hostname = argv[4]
logging.debug("Called %s for mac %s with ip %s and hostname %s on interface %s" % (action, mac, ip, hostname, interface))
globals()[action+'_lease'](mac, ip, hostname, interface)
logging.debug("Called %s for mac %s with ip %s and "
"hostname %s on interface %s",
action, mac, ip, hostname, interface)
globals()[action + '_lease'](mac, ip, hostname, interface)
else:
print init_leases(interface)
exit(0)
if __name__ == "__main__":
sys.exit(main())
main()
+15 -16
View File
@@ -37,20 +37,17 @@ FLAGS = flags.FLAGS
api_url = 'https://imagestore.canonical.com/api/dashboard'
image_cache = None
def images():
global image_cache
if not image_cache:
try:
images = json.load(urllib2.urlopen(api_url))['images']
image_cache = [i for i in images if i['title'].find('amd64') > -1]
except Exception:
print 'unable to download canonical image list'
sys.exit(1)
return image_cache
# FIXME(ja): add checksum/signature checks
def get_images():
"""Get a list of the images from the imagestore URL."""
images = json.load(urllib2.urlopen(api_url))['images']
images = [img for img in images if img['title'].find('amd64') > -1]
return images
def download(img):
"""Download an image to the local filesystem."""
# FIXME(ja): add checksum/signature checks
tempdir = tempfile.mkdtemp(prefix='cis-')
kernel_id = None
@@ -79,20 +76,22 @@ def download(img):
shutil.rmtree(tempdir)
def main():
"""Main entry point."""
utils.default_flagfile()
argv = FLAGS(sys.argv)
images = get_images()
if len(argv) == 2:
for img in images():
for img in images:
if argv[1] == 'all' or argv[1] == img['title']:
download(img)
else:
print 'usage: %s (title|all)'
print 'available images:'
for image in images():
print image['title']
for img in images:
print img['title']
if __name__ == '__main__':
main()
+5 -13
View File
@@ -22,7 +22,6 @@
"""
import logging
from twisted.internet import task
from twisted.application import service
from nova import twistd
@@ -30,7 +29,11 @@ from nova.compute import monitor
logging.getLogger('boto').setLevel(logging.WARN)
def main():
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
logging.warn('Starting instance monitor')
m = monitor.InstanceMonitor()
@@ -38,14 +41,3 @@ def main():
# parses this file, return it so that we can get it into globals below
application = service.Application('nova-instancemonitor')
m.setServiceParent(application)
return application
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
application = main()
+78 -63
View File
@@ -37,12 +37,15 @@ FLAGS = flags.FLAGS
class VpnCommands(object):
"""Class for managing VPNs."""
def __init__(self):
self.manager = manager.AuthManager()
self.instdir = model.InstanceDirectory()
self.pipe = pipelib.CloudPipe(cloud.CloudController())
def list(self):
"""Print a listing of the VPNs for all projects."""
print "%-12s\t" % 'project',
print "%-12s\t" % 'ip:port',
print "%s" % 'state'
@@ -50,11 +53,11 @@ class VpnCommands(object):
print "%-12s\t" % project.name,
print "%s:%s\t" % (project.vpn_ip, project.vpn_port),
vpn = self.__vpn_for(project.id)
vpn = self._vpn_for(project.id)
if vpn:
out, err = utils.execute(
"ping -c1 -w1 %s > /dev/null; echo $?"
% vpn['private_dns_name'], check_exit_code=False)
command = "ping -c1 -w1 %s > /dev/null; echo $?"
out, _err = utils.execute( command % vpn['private_dns_name'],
check_exit_code=False)
if out.strip() == '0':
net = 'up'
else:
@@ -68,25 +71,32 @@ class VpnCommands(object):
else:
print None
def __vpn_for(self, project_id):
def _vpn_for(self, project_id):
"""Get the VPN instance for a project ID."""
for instance in self.instdir.all:
if (instance.state.has_key('image_id')
if ('image_id' in instance.state
and instance['image_id'] == FLAGS.vpn_image_id
and not instance['state_description'] in ['shutting_down', 'shutdown']
and not instance['state_description'] in
['shutting_down', 'shutdown']
and instance['project_id'] == project_id):
return instance
def spawn(self):
"""Run all VPNs."""
for p in reversed(self.manager.get_projects()):
if not self.__vpn_for(p.id):
print 'spawning %s' % p.id
self.pipe.launch_vpn_instance(p.id)
time.sleep(10)
if not self._vpn_for(p.id):
print 'spawning %s' % p.id
self.pipe.launch_vpn_instance(p.id)
time.sleep(10)
def run(self, project_id):
"""Start the VPN for a given project."""
self.pipe.launch_vpn_instance(project_id)
class RoleCommands(object):
"""Class for managing roles."""
def __init__(self):
self.manager = manager.AuthManager()
@@ -109,25 +119,24 @@ class RoleCommands(object):
arguments: user, role [project]"""
self.manager.remove_role(user, role, project)
class UserCommands(object):
"""Class for managing users."""
def __init__(self):
self.manager = manager.AuthManager()
def __print_export(self, user):
print 'export EC2_ACCESS_KEY=%s' % user.access
print 'export EC2_SECRET_KEY=%s' % user.secret
def admin(self, name, access=None, secret=None):
"""creates a new admin and prints exports
arguments: name [access] [secret]"""
user = self.manager.create_user(name, access, secret, True)
self.__print_export(user)
print_export(user)
def create(self, name, access=None, secret=None):
"""creates a new user and prints exports
arguments: name [access] [secret]"""
user = self.manager.create_user(name, access, secret, False)
self.__print_export(user)
print_export(user)
def delete(self, name):
"""deletes an existing user
@@ -139,7 +148,7 @@ class UserCommands(object):
arguments: name"""
user = self.manager.get_user(name)
if user:
self.__print_export(user)
print_export(user)
else:
print "User %s doesn't exist" % name
@@ -149,53 +158,58 @@ class UserCommands(object):
for user in self.manager.get_users():
print user.name
def print_export(user):
"""Print export variables to use with API."""
print 'export EC2_ACCESS_KEY=%s' % user.access
print 'export EC2_SECRET_KEY=%s' % user.secret
class ProjectCommands(object):
"""Class for managing projects."""
def __init__(self):
self.manager = manager.AuthManager()
def add(self, project, user):
"""adds user to project
"""Adds user to project
arguments: project user"""
self.manager.add_to_project(user, project)
def create(self, name, project_manager, description=None):
"""creates a new project
"""Creates a new project
arguments: name project_manager [description]"""
user = self.manager.create_project(name, project_manager, description)
self.manager.create_project(name, project_manager, description)
def delete(self, name):
"""deletes an existing project
"""Deletes an existing project
arguments: name"""
self.manager.delete_project(name)
def environment(self, project_id, user_id, filename='novarc'):
"""exports environment variables to an sourcable file
"""Exports environment variables to an sourcable file
arguments: project_id user_id [filename='novarc]"""
rc = self.manager.get_environment_rc(project_id, user_id)
with open(filename, 'w') as f:
f.write(rc)
def list(self):
"""lists all projects
"""Lists all projects
arguments: <none>"""
for project in self.manager.get_projects():
print project.name
def remove(self, project, user):
"""removes user from project
"""Removes user from project
arguments: project user"""
self.manager.remove_from_project(user, project)
def zip(self, project_id, user_id, filename='nova.zip'):
"""exports credentials for project to a zip file
def zipfile(self, project_id, user_id, filename='nova.zip'):
"""Exports credentials for project to a zip file
arguments: project_id user_id [filename='nova.zip]"""
zip = self.manager.get_credentials(project_id, user_id)
zip_file = self.manager.get_credentials(user_id, project_id)
with open(filename, 'w') as f:
f.write(zip)
def usage(script_name):
print script_name + " category action [<args>]"
f.write(zip_file)
categories = [
@@ -207,62 +221,61 @@ categories = [
def lazy_match(name, key_value_tuples):
"""finds all objects that have a key that case insensitively contains [name]
key_value_tuples is a list of tuples of the form (key, value)
"""Finds all objects that have a key that case insensitively contains
[name] key_value_tuples is a list of tuples of the form (key, value)
returns a list of tuples of the form (key, value)"""
return [(k, v) for (k, v) in key_value_tuples if k.lower().find(name.lower()) == 0]
result = []
for (k, v) in key_value_tuples:
if k.lower().find(name.lower()) == 0:
result.append((k, v))
if len(result) == 0:
print "%s does not match any options:" % name
for k, _v in key_value_tuples:
print "\t%s" % k
sys.exit(2)
if len(result) > 1:
print "%s matched multiple options:" % name
for k, _v in result:
print "\t%s" % k
sys.exit(2)
return result
def methods_of(obj):
"""get all callable methods of an object that don't start with underscore
"""Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)"""
return [(i, getattr(obj, i)) for i in dir(obj) if callable(getattr(obj, i)) and not i.startswith('_')]
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
if __name__ == '__main__':
def main():
"""Parse options and call the appropriate class/method."""
utils.default_flagfile('/etc/nova/nova-manage.conf')
argv = FLAGS(sys.argv)
script_name = argv.pop(0)
if len(argv) < 1:
usage(script_name)
print script_name + " category action [<args>]"
print "Available categories:"
for k, v in categories:
for k, _ in categories:
print "\t%s" % k
sys.exit(2)
category = argv.pop(0)
matches = lazy_match(category, categories)
if len(matches) == 0:
print "%s does not match any categories:" % category
for k, v in categories:
print "\t%s" % k
sys.exit(2)
if len(matches) > 1:
print "%s matched multiple categories:" % category
for k, v in matches:
print "\t%s" % k
sys.exit(2)
# instantiate the command group object
category, fn = matches[0]
command_object = fn()
actions = methods_of(command_object)
if len(argv) < 1:
usage(script_name)
print script_name + " category action [<args>]"
print "Available actions for %s category:" % category
for k, v in actions:
for k, _v in actions:
print "\t%s" % k
sys.exit(2)
action = argv.pop(0)
matches = lazy_match(action, actions)
if len(matches) == 0:
print "%s does not match any actions" % action
for k, v in actions:
print "\t%s" % k
sys.exit(2)
if len(matches) > 1:
print "%s matched multiple actions:" % action
for k, v in matches:
print "\t%s" % k
sys.exit(2)
action, fn = matches[0]
# call the action with the remaining arguments
try:
@@ -273,3 +286,5 @@ if __name__ == '__main__':
print "%s %s: %s" % (category, action, fn.__doc__)
sys.exit(2)
if __name__ == '__main__':
main()
+1 -7
View File
@@ -30,15 +30,9 @@ from nova.objectstore import handler
FLAGS = flags.FLAGS
def main():
app = handler.get_application()
print app
return app
# NOTE(soren): Stolen from nova-compute
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
utils.default_flagfile()
application = main()
application = handler.get_application()
-58
View File
@@ -1,58 +0,0 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WSGI daemon for the main API endpoint.
"""
import logging
from tornado import ioloop
from wsgiref import simple_server
from nova import flags
from nova import rpc
from nova import server
from nova import utils
from nova.auth import manager
from nova.endpoint import rackspace
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
def main(_argv):
user_manager = manager.AuthManager()
api_instance = rackspace.Api(user_manager)
conn = rpc.Connection.instance()
rpc_consumer = rpc.AdapterConsumer(connection=conn,
topic=FLAGS.cloud_topic,
proxy=api_instance)
# TODO: fire rpc response listener (without attach to tornado)
# io_inst = ioloop.IOLoop.instance()
# _injected = consumer.attach_to_tornado(io_inst)
http_server = simple_server.WSGIServer(('0.0.0.0', FLAGS.cc_port), simple_server.WSGIRequestHandler)
http_server.set_app(api_instance.handler)
logging.debug('Started HTTP server on port %i' % FLAGS.cc_port)
while True:
http_server.handle_request()
# io_inst.start()
if __name__ == '__main__':
utils.default_flagfile()
server.serve('nova-rsapi', main)
+1
View File
@@ -40,6 +40,7 @@ Python libraries we don't vendor
* M2Crypto: python library interface for openssl
* curl
* XenAPI: Needed only for Xen Cloud Platform or XenServer support. Available from http://wiki.xensource.com/xenwiki/XCP_SDK or http://community.citrix.com/cdn/xs/sdks.
Vendored python libaries (don't require any installation)
+47 -1
View File
@@ -20,6 +20,7 @@ Nova User API client library.
"""
import base64
import boto
from boto.ec2.regioninfo import RegionInfo
@@ -57,6 +58,30 @@ class UserInfo(object):
elif name == 'secretkey':
self.secretkey = str(value)
class UserRole(object):
"""
Information about a Nova user's role, as parsed through SAX.
Fields include:
role
"""
def __init__(self, connection=None):
self.connection = connection
self.role = None
def __repr__(self):
return 'UserRole:%s' % self.role
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'role':
self.role = value
else:
setattr(self, name, str(value))
class ProjectInfo(object):
"""
Information about a Nova project, as parsed through SAX
@@ -92,12 +117,14 @@ class ProjectInfo(object):
else:
setattr(self, name, str(value))
class ProjectMember(object):
"""
Information about a Nova project member, as parsed through SAX.
Fields include:
memberId
"""
def __init__(self, connection=None):
self.connection = connection
self.memberId = None
@@ -113,8 +140,8 @@ class ProjectMember(object):
self.memberId = value
else:
setattr(self, name, str(value))
class HostInfo(object):
"""
Information about a Nova Host, as parsed through SAX:
@@ -142,6 +169,7 @@ class HostInfo(object):
def endElement(self, name, value, connection):
setattr(self, name, value)
class NovaAdminClient(object):
def __init__(self, clc_ip='127.0.0.1', region='nova', access_key='admin',
secret_key='admin', **kwargs):
@@ -196,6 +224,24 @@ class NovaAdminClient(object):
""" deletes a user """
return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo)
def get_roles(self, project_roles=True):
"""Returns a list of available roles."""
return self.apiconn.get_list('DescribeRoles',
{'ProjectRoles': project_roles},
[('item', UserRole)])
def get_user_roles(self, user, project=None):
"""Returns a list of roles for the given user.
Omitting project will return any global roles that the user has.
Specifying project will return only project specific roles.
"""
params = {'User':user}
if project:
params['Project'] = project
return self.apiconn.get_list('DescribeUserRoles',
params,
[('item', UserRole)])
def add_user_role(self, user, role, project=None):
"""
Add a role to a user either globally or for a specific project.
+16 -18
View File
@@ -16,25 +16,23 @@
# License for the specific language governing permissions and limitations
# under the License.
'''
Utility methods for working with WSGI servers
'''
"""
Root WSGI middleware for all API controllers.
"""
class Util(object):
import routes
@staticmethod
def route(reqstr, controllers):
if len(reqstr) == 0:
return Util.select_root_controller(controllers), []
parts = [x for x in reqstr.split("/") if len(x) > 0]
if len(parts) == 0:
return Util.select_root_controller(controllers), []
return controllers[parts[0]], parts[1:]
from nova import wsgi
from nova.api import ec2
from nova.api import rackspace
@staticmethod
def select_root_controller(controllers):
if '' in controllers:
return controllers['']
else:
return None
class API(wsgi.Router):
"""Routes top-level requests to the appropriate controller."""
def __init__(self):
mapper = routes.Mapper()
mapper.connect(None, "/v1.0/{path_info:.*}",
controller=rackspace.API())
mapper.connect(None, "/ec2/{path_info:.*}", controller=ec2.API())
super(API, self).__init__(mapper)
+42
View File
@@ -0,0 +1,42 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for EC2 API controllers.
"""
import routes
import webob.dec
from nova import wsgi
class API(wsgi.Router):
"""Routes EC2 requests to the appropriate controller."""
def __init__(self):
mapper = routes.Mapper()
mapper.connect(None, "{all:.*}", controller=self.dummy)
super(API, self).__init__(mapper)
@staticmethod
@webob.dec.wsgify
def dummy(req):
"""Temporary dummy controller."""
msg = "dummy response -- please hook up __init__() to cloud.py instead"
return repr({'dummy': msg,
'kwargs': repr(req.environ['wsgiorg.routing_args'][1])})
+81
View File
@@ -0,0 +1,81 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for Rackspace API controllers.
"""
import json
import time
import routes
import webob.dec
import webob.exc
from nova import flags
from nova import wsgi
from nova.api.rackspace import flavors
from nova.api.rackspace import images
from nova.api.rackspace import servers
from nova.api.rackspace import sharedipgroups
from nova.auth import manager
class API(wsgi.Middleware):
"""WSGI entry point for all Rackspace API requests."""
def __init__(self):
app = AuthMiddleware(APIRouter())
super(API, self).__init__(app)
class AuthMiddleware(wsgi.Middleware):
"""Authorize the rackspace API request or return an HTTP Forbidden."""
#TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced
#with correct RS API auth?
@webob.dec.wsgify
def __call__(self, req):
context = {}
if "HTTP_X_AUTH_TOKEN" in req.environ:
context['user'] = manager.AuthManager().get_user_from_access_key(
req.environ['HTTP_X_AUTH_TOKEN'])
if context['user']:
context['project'] = manager.AuthManager().get_project(
context['user'].name)
if "user" not in context:
return webob.exc.HTTPForbidden()
req.environ['nova.context'] = context
return self.application
class APIRouter(wsgi.Router):
"""
Routes requests on the Rackspace API to the appropriate controller
and method.
"""
def __init__(self):
mapper = routes.Mapper()
mapper.resource("server", "servers", controller=servers.Controller())
mapper.resource("image", "images", controller=images.Controller())
mapper.resource("flavor", "flavors", controller=flavors.Controller())
mapper.resource("sharedipgroup", "sharedipgroups",
controller=sharedipgroups.Controller())
super(APIRouter, self).__init__(mapper)
+30
View File
@@ -0,0 +1,30 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import wsgi
class Controller(wsgi.Controller):
"""TODO(eday): Base controller for all rackspace controllers. What is this
for? Is this just Rackspace specific? """
@classmethod
def render(cls, instance):
if isinstance(instance, list):
return {cls.entity_name: cls.render(instance)}
else:
return {"TODO": "TODO"}
+18
View File
@@ -0,0 +1,18 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Controller(object): pass
+18
View File
@@ -0,0 +1,18 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Controller(object): pass
+83
View File
@@ -0,0 +1,83 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import rpc
from nova.compute import model as compute
from nova.api.rackspace import base
class Controller(base.Controller):
entity_name = 'servers'
def index(self, **kwargs):
instances = []
for inst in compute.InstanceDirectory().all:
instances.append(instance_details(inst))
def show(self, **kwargs):
instance_id = kwargs['id']
return compute.InstanceDirectory().get(instance_id)
def delete(self, **kwargs):
instance_id = kwargs['id']
instance = compute.InstanceDirectory().get(instance_id)
if not instance:
raise ServerNotFound("The requested server was not found")
instance.destroy()
return True
def create(self, **kwargs):
inst = self.build_server_instance(kwargs['server'])
rpc.cast(
FLAGS.compute_topic, {
"method": "run_instance",
"args": {"instance_id": inst.instance_id}})
def update(self, **kwargs):
instance_id = kwargs['id']
instance = compute.InstanceDirectory().get(instance_id)
if not instance:
raise ServerNotFound("The requested server was not found")
instance.update(kwargs['server'])
instance.save()
def build_server_instance(self, env):
"""Build instance data structure and save it to the data store."""
reservation = utils.generate_uid('r')
ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
inst = self.instdir.new()
inst['name'] = env['server']['name']
inst['image_id'] = env['server']['imageId']
inst['instance_type'] = env['server']['flavorId']
inst['user_id'] = env['user']['id']
inst['project_id'] = env['project']['id']
inst['reservation_id'] = reservation
inst['launch_time'] = ltime
inst['mac_address'] = utils.generate_mac()
address = self.network.allocate_ip(
inst['user_id'],
inst['project_id'],
mac=inst['mac_address'])
inst['private_dns_name'] = str(address)
inst['bridge_name'] = network.BridgedNetwork.get_network_for_project(
inst['user_id'],
inst['project_id'],
'default')['bridge_name']
# key_data, key_name, ami_launch_index
# TODO(todd): key data or root password
inst.save()
return inst
+18
View File
@@ -0,0 +1,18 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Controller(object): pass
-1
View File
@@ -219,7 +219,6 @@ class FakeLDAP(object):
raise NO_SUCH_OBJECT()
return objects
@property
def __redis_prefix(self):
return 'ldap:'
+29 -7
View File
@@ -30,6 +30,7 @@ import sys
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ldap_url', 'ldap://localhost',
'Point this at your ldap server')
@@ -182,7 +183,8 @@ class LdapDriver(object):
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.NotFound("Project can't be created "
"because user %s doesn't exist" % member_uid)
"because user %s doesn't exist"
% member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
@@ -236,6 +238,26 @@ class LdapDriver(object):
role_dn = self.__role_to_dn(role, project_id)
return self.__remove_from_group(uid, role_dn)
def get_user_roles(self, uid, project_id=None):
"""Retrieve list of roles for user (or user and project)"""
if project_id is None:
# NOTE(vish): This is unneccesarily slow, but since we can't
# guarantee that the global roles are located
# together in the ldap tree, we're doing this version.
roles = []
for role in FLAGS.allowed_roles:
role_dn = self.__role_to_dn(role)
if self.__is_in_group(uid, role_dn):
roles.append(role)
return roles
else:
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
roles = self.__find_objects(project_dn,
'(&(&(objectclass=groupOfNames)'
'(!(objectclass=novaProject)))'
'(member=%s))' % self.__uid_to_dn(uid))
return [role['cn'][0] for role in roles]
def delete_user(self, uid):
"""Delete a user"""
if not self.__user_exists(uid):
@@ -253,24 +275,24 @@ class LdapDriver(object):
self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
FLAGS.ldap_user_subtree))
def delete_project(self, name):
def delete_project(self, project_id):
"""Delete a project"""
project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree)
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
def __user_exists(self, name):
def __user_exists(self, uid):
"""Check if user exists"""
return self.get_user(name) != None
return self.get_user(uid) != None
def __key_pair_exists(self, uid, key_name):
"""Check if key pair exists"""
return self.get_user(uid) != None
return self.get_key_pair(uid, key_name) != None
def __project_exists(self, name):
def __project_exists(self, project_id):
"""Check if project exists"""
return self.get_project(name) != None
return self.get_project(project_id) != None
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
+33 -9
View File
@@ -29,16 +29,17 @@ import uuid
import zipfile
from nova import crypto
from nova import datastore
from nova import exception
from nova import flags
from nova import objectstore # for flags
from nova import utils
from nova.auth import ldapdriver # for flags
from nova.auth import signer
from nova.network import vpn
FLAGS = flags.FLAGS
flags.DEFINE_list('allowed_roles',
['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'],
'Allowed roles for project')
# NOTE(vish): a user with one of these roles will be a superuser and
# have access to all api commands
@@ -50,7 +51,6 @@ flags.DEFINE_list('superuser_roles', ['cloudadmin'],
flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
'Roles that apply to all projects')
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
@@ -65,15 +65,14 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
flags.DEFINE_string('credential_cert_subject',
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
'OU=NovaDev/CN=%s-%s',
'Subject for certificate for users')
flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
'Driver that auth manager uses')
class AuthBase(object):
"""Base class for objects relating to auth
@@ -81,6 +80,7 @@ class AuthBase(object):
an id member. They may optionally contain methods that delegate to
AuthManager, but should not implement logic themselves.
"""
@classmethod
def safe_id(cls, obj):
"""Safe get object id
@@ -98,7 +98,9 @@ class AuthBase(object):
class User(AuthBase):
"""Object representing a user"""
def __init__(self, id, name, access, secret, admin):
AuthBase.__init__(self)
self.id = id
self.name = name
self.access = access
@@ -158,7 +160,9 @@ class KeyPair(AuthBase):
Even though this object is named KeyPair, only the public key and
fingerprint is stored. The user's private key is not saved.
"""
def __init__(self, id, name, owner_id, public_key, fingerprint):
AuthBase.__init__(self)
self.id = id
self.name = name
self.owner_id = owner_id
@@ -175,7 +179,9 @@ class KeyPair(AuthBase):
class Project(AuthBase):
"""Represents a Project returned from the datastore"""
def __init__(self, id, name, project_manager_id, description, member_ids):
AuthBase.__init__(self)
self.id = id
self.name = name
self.project_manager_id = project_manager_id
@@ -222,7 +228,6 @@ class Project(AuthBase):
self.member_ids)
class AuthManager(object):
"""Manager Singleton for dealing with Users, Projects, and Keypairs
@@ -234,7 +239,9 @@ class AuthManager(object):
AuthManager also manages associated data related to Auth objects that
need to be more accessible, such as vpn ips and ports.
"""
_instance=None
_instance = None
def __new__(cls, *args, **kwargs):
"""Returns the AuthManager singleton"""
if not cls._instance:
@@ -248,7 +255,7 @@ class AuthManager(object):
reset the driver if it is not set or a new driver is specified.
"""
if driver or not getattr(self, 'driver', None):
self.driver = utils.import_class(driver or FLAGS.auth_driver)
self.driver = utils.import_class(driver or FLAGS.auth_driver)
def authenticate(self, access, signature, params, verb='GET',
server_string='127.0.0.1:8773', path='/',
@@ -431,6 +438,10 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to add local role.
"""
if role not in FLAGS.allowed_roles:
raise exception.NotFound("The %s role can not be found" % role)
if project is not None and role in FLAGS.global_roles:
raise exception.NotFound("The %s role is global only" % role)
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
@@ -454,6 +465,19 @@ class AuthManager(object):
with self.driver() as drv:
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
def get_roles(self, project_roles=True):
"""Get list of allowed roles"""
if project_roles:
return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles))
else:
return FLAGS.allowed_roles
def get_user_roles(self, user, project=None):
"""Get user global or per-project roles"""
with self.driver() as drv:
return drv.get_user_roles(User.safe_id(user),
Project.safe_id(project))
def get_project(self, pid):
"""Get project object by id"""
with self.driver() as drv:
+2
View File
@@ -32,6 +32,7 @@ def allow(*roles):
return wrapped_f
return wrap
def deny(*roles):
def wrap(f):
def wrapped_f(self, context, *args, **kwargs):
@@ -44,6 +45,7 @@ def deny(*roles):
return wrapped_f
return wrap
def __matches_role(context, role):
if role == 'all':
return True
+7 -3
View File
@@ -48,11 +48,15 @@ import hashlib
import hmac
import logging
import urllib
import boto # NOTE(vish): for new boto
import boto.utils # NOTE(vish): for old boto
# NOTE(vish): for new boto
import boto
# NOTE(vish): for old boto
import boto.utils
from nova.exception import Error
class Signer(object):
""" hacked up code from boto/connection.py """
@@ -77,7 +81,6 @@ class Signer(object):
return self._calc_signature_2(params, verb, server_string, path)
raise Error('Unknown Signature Version: %s' % self.SignatureVersion)
def _get_utf8_value(self, value):
if not isinstance(value, str) and not isinstance(value, unicode):
value = str(value)
@@ -133,5 +136,6 @@ class Signer(object):
logging.debug('base64 encoded digest: %s' % b64)
return b64
if __name__ == '__main__':
print Signer('foo').generate({"SignatureMethod": 'HmacSHA256', 'SignatureVersion': '2'}, "get", "server", "/foo")
+2 -1
View File
@@ -21,9 +21,10 @@ Tornado REST API Request Handlers for CloudPipe
"""
import logging
import tornado.web
import urllib
import tornado.web
from nova import crypto
from nova.auth import manager
+1 -1
View File
@@ -36,11 +36,11 @@ from nova.endpoint import api
FLAGS = flags.FLAGS
flags.DEFINE_string('boot_script_template',
utils.abspath('cloudpipe/bootscript.sh'),
'Template for script to run on cloudpipe instance boot')
class CloudPipe(object):
def __init__(self, cloud_controller):
self.controller = cloud_controller
+4
View File
@@ -24,6 +24,7 @@ Includes injection of SSH PGP keys into authorized_keys file.
import logging
import os
import tempfile
from twisted.internet import defer
from nova import exception
@@ -84,6 +85,7 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
yield execute('dd if=%s of=%s bs=%d seek=%d conv=notrunc,fsync'
% (infile, outfile, sector_size, primary_first))
@defer.inlineCallbacks
def inject_data(image, key=None, net=None, partition=None, execute=None):
"""Injects a ssh key and optionally net data into a disk image.
@@ -137,6 +139,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
# remove loopback
yield execute('sudo losetup -d %s' % device)
@defer.inlineCallbacks
def _inject_key_into_fs(key, fs, execute=None):
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
@@ -146,6 +149,7 @@ def _inject_key_into_fs(key, fs, execute=None):
keyfile = os.path.join(sshdir, 'authorized_keys')
yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
@defer.inlineCallbacks
def _inject_net_into_fs(net, fs, execute=None):
netfile = os.path.join(os.path.join(os.path.join(
+2
View File
@@ -168,6 +168,7 @@ class Instance(datastore.BasicModel):
self.unassociate_with("ip", self.state['private_dns_name'])
return super(Instance, self).destroy()
class Host(datastore.BasicModel):
"""A Host is the machine where a Daemon is running."""
@@ -235,6 +236,7 @@ class Daemon(datastore.BasicModel):
for x in cls.associated_to("host", hostname):
yield x
class SessionToken(datastore.BasicModel):
"""This is a short-lived auth token that is passed through web requests"""
+21 -14
View File
@@ -24,14 +24,15 @@ Instance Monitoring:
in the object store.
"""
import boto
import boto.s3
import datetime
import logging
import os
import rrdtool
import sys
import time
import boto
import boto.s3
import rrdtool
from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
@@ -41,13 +42,12 @@ from nova.virt import connection as virt_connection
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'monitoring_instances_delay', 5, 'Sleep time between updates')
flags.DEFINE_integer(
'monitoring_instances_step', 300, 'Interval of RRD updates')
flags.DEFINE_string(
'monitoring_rrd_path', '/var/nova/monitor/instances',
'Location of RRD files')
flags.DEFINE_integer('monitoring_instances_delay', 5,
'Sleep time between updates')
flags.DEFINE_integer('monitoring_instances_step', 300,
'Interval of RRD updates')
flags.DEFINE_string('monitoring_rrd_path', '/var/nova/monitor/instances',
'Location of RRD files')
RRD_VALUES = {
@@ -61,7 +61,7 @@ RRD_VALUES = {
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
],
],
'net': [
'DS:rx:COUNTER:600:0:1250000',
'DS:tx:COUNTER:600:0:1250000',
@@ -73,7 +73,7 @@ RRD_VALUES = {
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:288:800',
],
],
'disk': [
'DS:rd:COUNTER:600:U:U',
'DS:wr:COUNTER:600:U:U',
@@ -85,12 +85,13 @@ RRD_VALUES = {
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:444:800',
]
}
]
}
utcnow = datetime.datetime.utcnow
def update_rrd(instance, name, data):
"""
Updates the specified RRD file.
@@ -106,6 +107,7 @@ def update_rrd(instance, name, data):
'%d:%s' % (timestamp, data)
)
def init_rrd(instance, name):
"""
Initializes the specified RRD file.
@@ -124,6 +126,7 @@ def init_rrd(instance, name):
'--start', '0',
*RRD_VALUES[name]
)
def graph_cpu(instance, duration):
"""
@@ -148,6 +151,7 @@ def graph_cpu(instance, duration):
store_graph(instance.instance_id, filename)
def graph_net(instance, duration):
"""
Creates a graph of network usage for the specified instance and duration.
@@ -174,6 +178,7 @@ def graph_net(instance, duration):
)
store_graph(instance.instance_id, filename)
def graph_disk(instance, duration):
"""
@@ -202,6 +207,7 @@ def graph_disk(instance, duration):
store_graph(instance.instance_id, filename)
def store_graph(instance_id, filename):
"""
Transmits the specified graph file to internal object store on cloud
@@ -387,6 +393,7 @@ class InstanceMonitor(object, service.Service):
"""
Monitors the running instances of the current machine.
"""
def __init__(self):
"""
Initialize the monitoring loop.
+1
View File
@@ -29,6 +29,7 @@ import json
import logging
import os
import sys
from twisted.internet import defer
from twisted.internet import task
+7 -1
View File
@@ -24,7 +24,6 @@ SSH keypairs and x509 certificates.
import base64
import hashlib
import logging
import M2Crypto
import os
import shutil
import struct
@@ -32,6 +31,8 @@ import tempfile
import time
import utils
import M2Crypto
from nova import exception
from nova import flags
@@ -42,11 +43,13 @@ flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our ke
flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA')
flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?')
def ca_path(project_id):
if project_id:
return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id)
return "%s/cacert.pem" % (FLAGS.ca_path)
def fetch_ca(project_id=None, chain=True):
if not FLAGS.use_intermediate_ca:
project_id = None
@@ -60,6 +63,7 @@ def fetch_ca(project_id=None, chain=True):
buffer += cafile.read()
return buffer
def generate_key_pair(bits=1024):
# what is the magic 65537?
@@ -109,6 +113,7 @@ def generate_x509_cert(subject, bits=1024):
shutil.rmtree(tmpdir)
return (private_key, csr)
def sign_csr(csr_text, intermediate=None):
if not FLAGS.use_intermediate_ca:
intermediate = None
@@ -122,6 +127,7 @@ def sign_csr(csr_text, intermediate=None):
os.chdir(start)
return _sign_csr(csr_text, user_ca)
def _sign_csr(csr_text, ca_folder):
tmpfolder = tempfile.mkdtemp()
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
+11 -4
View File
@@ -124,12 +124,16 @@ class BasicModel(object):
yield cls(identifier)
@classmethod
@absorb_connection_error
def associated_to(cls, foreign_type, foreign_id):
redis_set = cls._redis_association_name(foreign_type, foreign_id)
for identifier in Redis.instance().smembers(redis_set):
for identifier in cls.associated_keys(foreign_type, foreign_id):
yield cls(identifier)
@classmethod
@absorb_connection_error
def associated_keys(cls, foreign_type, foreign_id):
redis_set = cls._redis_association_name(foreign_type, foreign_id)
return Redis.instance().smembers(redis_set) or []
@classmethod
def _redis_set_name(cls, kls_name):
# stupidly pluralize (for compatiblity with previous codebase)
@@ -138,7 +142,7 @@ class BasicModel(object):
@classmethod
def _redis_association_name(cls, foreign_type, foreign_id):
return cls._redis_set_name("%s:%s:%s" %
(foreign_type, foreign_id, cls.__name__))
(foreign_type, foreign_id, cls._redis_name()))
@property
def identifier(self):
@@ -170,6 +174,9 @@ class BasicModel(object):
def setdefault(self, item, default):
return self.state.setdefault(item, default)
def __contains__(self, item):
return item in self.state
def __getitem__(self, item):
return self.state[item]
-32
View File
@@ -1,32 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nova.endpoint` -- Main NOVA Api endpoints
=====================================================
.. automodule:: nova.endpoint
:platform: Unix
:synopsis: REST APIs for all nova functions
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
+19
View File
@@ -37,6 +37,7 @@ def user_dict(user, base64_file=None):
else:
return {}
def project_dict(project):
"""Convert the project object to a result dict"""
if project:
@@ -47,6 +48,7 @@ def project_dict(project):
else:
return {}
def host_dict(host):
"""Convert a host model object to a result dict"""
if host:
@@ -54,6 +56,7 @@ def host_dict(host):
else:
return {}
def admin_only(target):
"""Decorator for admin-only API calls"""
def wrapper(*args, **kwargs):
@@ -66,6 +69,7 @@ def admin_only(target):
return wrapper
class AdminController(object):
"""
API Controller for users, hosts, nodes, and workers.
@@ -102,6 +106,21 @@ class AdminController(object):
return True
@admin_only
def describe_roles(self, context, project_roles=True, **kwargs):
"""Returns a list of allowed roles."""
roles = manager.AuthManager().get_roles(project_roles)
return { 'roles': [{'role': r} for r in roles]}
@admin_only
def describe_user_roles(self, context, user, project=None, **kwargs):
"""Returns a list of roles for the given user.
Omitting project will return any global roles that the user has.
Specifying project will return only project specific roles.
"""
roles = manager.AuthManager().get_user_roles(user, project=project)
return { 'roles': [{'role': r} for r in roles]}
@admin_only
def modify_user_role(self, context, user, role, project=None,
operation='add', **kwargs):
+5 -2
View File
@@ -25,12 +25,13 @@ import logging
import multiprocessing
import random
import re
import tornado.web
from twisted.internet import defer
import urllib
# TODO(termie): replace minidom with etree
from xml.dom import minidom
import tornado.web
from twisted.internet import defer
from nova import crypto
from nova import exception
from nova import flags
@@ -43,6 +44,7 @@ from nova.endpoint import cloud
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
_log = logging.getLogger("api")
_log.setLevel(logging.DEBUG)
@@ -227,6 +229,7 @@ class MetadataRequestHandler(tornado.web.RequestHandler):
self.print_data(data)
self.finish()
class APIRequestHandler(tornado.web.RequestHandler):
def get(self, controller_name):
self.execute(controller_name)
+85 -75
View File
@@ -26,6 +26,7 @@ import base64
import logging
import os
import time
from twisted.internet import defer
from nova import datastore
@@ -44,9 +45,9 @@ from nova.volume import service
FLAGS = flags.FLAGS
flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
def _gen_key(user_id, key_name):
""" Tuck this into AuthManager """
try:
@@ -85,7 +86,7 @@ class CloudController(object):
""" Ensure the keychains and folders exist. """
# Create keys folder, if it doesn't exist
if not os.path.exists(FLAGS.keys_path):
os.makedirs(os.path.abspath(FLAGS.keys_path))
os.makedirs(FLAGS.keys_path)
# Gen root CA, if we don't have one
root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file)
if not os.path.exists(root_ca_path):
@@ -102,15 +103,16 @@ class CloudController(object):
result = {}
for instance in self.instdir.all:
if instance['project_id'] == project_id:
line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus'])
line = '%s slots=%d' % (instance['private_dns_name'],
INSTANCE_TYPES[instance['instance_type']]['vcpus'])
if instance['key_name'] in result:
result[instance['key_name']].append(line)
else:
result[instance['key_name']] = [line]
return result
def get_metadata(self, ip):
i = self.get_instance_by_ip(ip)
def get_metadata(self, ipaddress):
i = self.get_instance_by_ip(ipaddress)
if i is None:
return None
mpi = self._get_mpi_data(i['project_id'])
@@ -123,6 +125,12 @@ class CloudController(object):
}
else:
keys = ''
address_record = network_model.FixedIp(i['private_dns_name'])
if address_record:
hostname = address_record['hostname']
else:
hostname = 'ip-%s' % i['private_dns_name'].replace('.', '-')
data = {
'user-data': base64.b64decode(i['user_data']),
'meta-data': {
@@ -135,19 +143,19 @@ class CloudController(object):
'root': '/dev/sda1',
'swap': 'sda3'
},
'hostname': i['private_dns_name'], # is this public sometimes?
'hostname': hostname,
'instance-action': 'none',
'instance-id': i['instance_id'],
'instance-type': i.get('instance_type', ''),
'local-hostname': i['private_dns_name'],
'local-hostname': hostname,
'local-ipv4': i['private_dns_name'], # TODO: switch to IP
'kernel-id': i.get('kernel_id', ''),
'placement': {
'availaibility-zone': i.get('availability_zone', 'nova'),
},
'public-hostname': i.get('dns_name', ''),
'public-hostname': hostname,
'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP
'public-keys' : keys,
'public-keys': keys,
'ramdisk-id': i.get('ramdisk_id', ''),
'reservation-id': i['reservation_id'],
'security-groups': i.get('groups', ''),
@@ -203,26 +211,22 @@ class CloudController(object):
'keyFingerprint': key_pair.fingerprint,
})
return { 'keypairsSet': result }
return {'keypairsSet': result}
@rbac.allow('all')
def create_key_pair(self, context, key_name, **kwargs):
try:
d = defer.Deferred()
p = context.handler.application.settings.get('pool')
def _complete(kwargs):
if 'exception' in kwargs:
d.errback(kwargs['exception'])
return
d.callback({'keyName': key_name,
'keyFingerprint': kwargs['fingerprint'],
'keyMaterial': kwargs['private_key']})
p.apply_async(_gen_key, [context.user.id, key_name],
callback=_complete)
return d
except manager.UserError as e:
raise
dcall = defer.Deferred()
pool = context.handler.application.settings.get('pool')
def _complete(kwargs):
if 'exception' in kwargs:
dcall.errback(kwargs['exception'])
return
dcall.callback({'keyName': key_name,
'keyFingerprint': kwargs['fingerprint'],
'keyMaterial': kwargs['private_key']})
pool.apply_async(_gen_key, [context.user.id, key_name],
callback=_complete)
return dcall
@rbac.allow('all')
def delete_key_pair(self, context, key_name, **kwargs):
@@ -232,7 +236,7 @@ class CloudController(object):
@rbac.allow('all')
def describe_security_groups(self, context, group_names, **kwargs):
groups = { 'securityGroupSet': [] }
groups = {'securityGroupSet': []}
# Stubbed for now to unblock other things.
return groups
@@ -251,7 +255,7 @@ class CloudController(object):
instance = self._get_instance(context, instance_id[0])
return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "get_console_output",
"args" : {"instance_id": instance_id[0]}})
"args": {"instance_id": instance_id[0]}})
def _get_user_id(self, context):
if context and context.user:
@@ -285,10 +289,10 @@ class CloudController(object):
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': volume['delete_on_termination'],
'device' : volume['mountpoint'],
'instanceId' : volume['instance_id'],
'status' : 'attached',
'volume_id' : volume['volume_id']}]
'device': volume['mountpoint'],
'instanceId': volume['instance_id'],
'status': 'attached',
'volume_id': volume['volume_id']}]
else:
v['attachmentSet'] = [{}]
return v
@@ -298,16 +302,16 @@ class CloudController(object):
def create_volume(self, context, size, **kwargs):
# TODO(vish): refactor this to create the volume object here and tell service to create it
result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume",
"args" : {"size": size,
"args": {"size": size,
"user_id": context.user.id,
"project_id": context.project.id}})
# NOTE(vish): rpc returned value is in the result key in the dictionary
volume = self._get_volume(context, result['result'])
volume = self._get_volume(context, result)
defer.returnValue({'volumeSet': [self.format_volume(context, volume)]})
def _get_address(self, context, public_ip):
# FIXME(vish) this should move into network.py
address = network_model.PublicAddress.lookup(public_ip)
address = network_model.ElasticIp.lookup(public_ip)
if address and (context.user.is_admin() or address['project_id'] == context.project.id):
return address
raise exception.NotFound("Address at ip %s not found" % public_ip)
@@ -348,16 +352,15 @@ class CloudController(object):
compute_node = instance['node_name']
rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
{"method": "attach_volume",
"args" : {"volume_id": volume_id,
"instance_id" : instance_id,
"mountpoint" : device}})
return defer.succeed({'attachTime' : volume['attach_time'],
'device' : volume['mountpoint'],
'instanceId' : instance_id,
'requestId' : context.request_id,
'status' : volume['attach_status'],
'volumeId' : volume_id})
"args": {"volume_id": volume_id,
"instance_id": instance_id,
"mountpoint": device}})
return defer.succeed({'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': instance_id,
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': volume_id})
@rbac.allow('projectmanager', 'sysadmin')
def detach_volume(self, context, volume_id, **kwargs):
@@ -372,18 +375,18 @@ class CloudController(object):
instance = self._get_instance(context, instance_id)
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "detach_volume",
"args" : {"instance_id": instance_id,
"args": {"instance_id": instance_id,
"volume_id": volume_id}})
except exception.NotFound:
# If the instance doesn't exist anymore,
# then we need to call detach blind
volume.finish_detach()
return defer.succeed({'attachTime' : volume['attach_time'],
'device' : volume['mountpoint'],
'instanceId' : instance_id,
'requestId' : context.request_id,
'status' : volume['attach_status'],
'volumeId' : volume_id})
return defer.succeed({'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': instance_id,
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': volume_id})
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
@@ -394,7 +397,15 @@ class CloudController(object):
@rbac.allow('all')
def describe_instances(self, context, **kwargs):
return defer.succeed(self._format_instances(context))
return defer.succeed(self._format_describe_instances(context))
def _format_describe_instances(self, context):
return { 'reservationSet': self._format_instances(context) }
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id)
assert len(i) == 1
return i[0]
def _format_instances(self, context, reservation_id = None):
reservations = {}
@@ -425,7 +436,8 @@ class CloudController(object):
i['key_name'] = instance.get('key_name', None)
if context.user.is_admin():
i['key_name'] = '%s (%s, %s)' % (i['key_name'],
instance.get('project_id', None), instance.get('node_name',''))
instance.get('project_id', None),
instance.get('node_name', ''))
i['product_codes_set'] = self._convert_to_set(
instance.get('product_codes', None), 'product_code')
i['instance_type'] = instance.get('instance_type', None)
@@ -442,8 +454,7 @@ class CloudController(object):
reservations[res_id] = r
reservations[res_id]['instances_set'].append(i)
instance_response = {'reservationSet' : list(reservations.values()) }
return instance_response
return list(reservations.values())
@rbac.allow('all')
def describe_addresses(self, context, **kwargs):
@@ -451,13 +462,13 @@ class CloudController(object):
def format_addresses(self, context):
addresses = []
for address in network_model.PublicAddress.all():
for address in network_model.ElasticIp.all():
# TODO(vish): implement a by_project iterator for addresses
if (context.user.is_admin() or
address['project_id'] == context.project.id):
address_rv = {
'public_ip': address['address'],
'instance_id' : address.get('instance_id', 'free')
'instance_id': address.get('instance_id', 'free')
}
if context.user.is_admin():
address_rv['instance_id'] = "%s (%s, %s)" % (
@@ -472,12 +483,11 @@ class CloudController(object):
@defer.inlineCallbacks
def allocate_address(self, context, **kwargs):
network_topic = yield self._get_network_topic(context)
alloc_result = yield rpc.call(network_topic,
public_ip = yield rpc.call(network_topic,
{"method": "allocate_elastic_ip",
"args": {"user_id": context.user.id,
"project_id": context.project.id}})
public_ip = alloc_result['result']
defer.returnValue({'addressSet': [{'publicIp' : public_ip}]})
defer.returnValue({'addressSet': [{'publicIp': public_ip}]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
@@ -517,11 +527,10 @@ class CloudController(object):
"""Retrieves the network host for a project"""
host = network_service.get_host_for_project(context.project.id)
if not host:
result = yield rpc.call(FLAGS.network_topic,
host = yield rpc.call(FLAGS.network_topic,
{"method": "set_network_host",
"args": {"user_id": context.user.id,
"project_id": context.project.id}})
host = result['result']
defer.returnValue('%s.%s' %(FLAGS.network_topic, host))
@rbac.allow('projectmanager', 'sysadmin')
@@ -561,17 +570,17 @@ class CloudController(object):
# TODO: Get the real security group of launch in here
security_group = "default"
for num in range(int(kwargs['max_count'])):
vpn = False
is_vpn = False
if image_id == FLAGS.vpn_image_id:
vpn = True
allocate_result = yield rpc.call(network_topic,
is_vpn = True
inst = self.instdir.new()
allocate_data = yield rpc.call(network_topic,
{"method": "allocate_fixed_ip",
"args": {"user_id": context.user.id,
"project_id": context.project.id,
"security_group": security_group,
"vpn": vpn}})
allocate_data = allocate_result['result']
inst = self.instdir.new()
"is_vpn": is_vpn,
"hostname": inst.instance_id}})
inst['image_id'] = image_id
inst['kernel_id'] = kernel_id
inst['ramdisk_id'] = ramdisk_id
@@ -585,17 +594,18 @@ class CloudController(object):
inst['project_id'] = context.project.id
inst['ami_launch_index'] = num
inst['security_group'] = security_group
inst['hostname'] = inst.instance_id
for (key, value) in allocate_data.iteritems():
inst[key] = value
inst.save()
rpc.cast(FLAGS.compute_topic,
{"method": "run_instance",
"args": {"instance_id" : inst.instance_id}})
"args": {"instance_id": inst.instance_id}})
logging.debug("Casting to node for %s's instance with IP of %s" %
(context.user.name, inst['private_dns_name']))
# TODO: Make Network figure out the network name from ip.
defer.returnValue(self._format_instances(context, reservation_id))
defer.returnValue(self._format_run_instances(context, reservation_id))
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
@@ -646,7 +656,7 @@ class CloudController(object):
instance = self._get_instance(context, i)
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "reboot_instance",
"args" : {"instance_id": i}})
"args": {"instance_id": i}})
return defer.succeed(True)
@rbac.allow('projectmanager', 'sysadmin')
@@ -656,7 +666,7 @@ class CloudController(object):
volume_node = volume['node_name']
rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node),
{"method": "delete_volume",
"args" : {"volume_id": volume_id}})
"args": {"volume_id": volume_id}})
return defer.succeed(True)
@rbac.allow('all')
@@ -689,9 +699,9 @@ class CloudController(object):
image = images.list(context, image_id)[0]
except IndexError:
raise exception.ApiError('invalid id: %s' % image_id)
result = { 'image_id': image_id, 'launchPermission': [] }
result = {'image_id': image_id, 'launchPermission': []}
if image['isPublic']:
result['launchPermission'].append({ 'group': 'all' })
result['launchPermission'].append({'group': 'all'})
return defer.succeed(result)
@rbac.allow('projectmanager', 'sysadmin')
+6 -1
View File
@@ -21,10 +21,11 @@ Proxy AMI-related calls from the cloud controller, to the running
objectstore daemon.
"""
import boto.s3.connection
import json
import urllib
import boto.s3.connection
from nova import flags
from nova import utils
from nova.auth import manager
@@ -32,6 +33,7 @@ from nova.auth import manager
FLAGS = flags.FLAGS
def modify(context, image_id, operation):
conn(context).make_request(
method='POST',
@@ -53,6 +55,7 @@ def register(context, image_location):
return image_id
def list(context, filter_list=[]):
""" return a list of all images that a user can see
@@ -68,6 +71,7 @@ def list(context, filter_list=[]):
return [i for i in result if i['imageId'] in filter_list]
return result
def deregister(context, image_id):
""" unregister an image """
conn(context).make_request(
@@ -75,6 +79,7 @@ def deregister(context, image_id):
bucket='_images',
query_args=qs({'image_id': image_id}))
def conn(context):
access = manager.AuthManager().get_access_key(context.user,
context.project)
-226
View File
@@ -1,226 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Rackspace API
"""
import base64
import json
import logging
import multiprocessing
import os
import time
import tornado.web
from twisted.internet import defer
from nova import datastore
from nova import exception
from nova import flags
from nova import rpc
from nova import utils
from nova.auth import manager
from nova.compute import model
from nova.compute import network
from nova.endpoint import images
from nova.endpoint import wsgi
FLAGS = flags.FLAGS
flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
# TODO(todd): subclass Exception so we can bubble meaningful errors
class Api(object):
def __init__(self, rpc_mechanism):
self.controllers = {
"v1.0": RackspaceAuthenticationApi(),
"servers": RackspaceCloudServerApi()
}
self.rpc_mechanism = rpc_mechanism
def handler(self, environ, responder):
environ['nova.context'] = self.build_context(environ)
controller, path = wsgi.Util.route(
environ['PATH_INFO'],
self.controllers
)
if not controller:
# TODO(todd): Exception (404)
raise Exception("Missing Controller")
rv = controller.process(path, environ)
if type(rv) is tuple:
responder(rv[0], rv[1])
rv = rv[2]
else:
responder("200 OK", [])
return rv
def build_context(self, env):
rv = {}
if env.has_key("HTTP_X_AUTH_TOKEN"):
rv['user'] = manager.AuthManager().get_user_from_access_key(
env['HTTP_X_AUTH_TOKEN']
)
if rv['user']:
rv['project'] = manager.AuthManager().get_project(
rv['user'].name
)
return rv
class RackspaceApiEndpoint(object):
def process(self, path, env):
if not self.check_authentication(env):
# TODO(todd): Exception (Unauthorized)
raise Exception("Unable to authenticate")
if len(path) == 0:
return self.index(env)
action = path.pop(0)
if hasattr(self, action):
method = getattr(self, action)
return method(path, env)
else:
# TODO(todd): Exception (404)
raise Exception("Missing method %s" % path[0])
def check_authentication(self, env):
if hasattr(self, "process_without_authentication") \
and getattr(self, "process_without_authentication"):
return True
if not env['nova.context']['user']:
return False
return True
class RackspaceAuthenticationApi(RackspaceApiEndpoint):
def __init__(self):
self.process_without_authentication = True
# TODO(todd): make a actual session with a unique token
# just pass the auth key back through for now
def index(self, env):
response = '204 No Content'
headers = [
('X-Server-Management-Url', 'http://%s' % env['HTTP_HOST']),
('X-Storage-Url', 'http://%s' % env['HTTP_HOST']),
('X-CDN-Managment-Url', 'http://%s' % env['HTTP_HOST']),
('X-Auth-Token', env['HTTP_X_AUTH_KEY'])
]
body = ""
return (response, headers, body)
class RackspaceCloudServerApi(RackspaceApiEndpoint):
def __init__(self):
self.instdir = model.InstanceDirectory()
self.network = network.PublicNetworkController()
def index(self, env):
if env['REQUEST_METHOD'] == 'GET':
return self.detail(env)
elif env['REQUEST_METHOD'] == 'POST':
return self.launch_server(env)
def detail(self, args, env):
value = {
"servers":
[]
}
for inst in self.instdir.all:
value["servers"].append(self.instance_details(inst))
return json.dumps(value)
##
##
def launch_server(self, env):
data = json.loads(env['wsgi.input'].read(int(env['CONTENT_LENGTH'])))
inst = self.build_server_instance(data, env['nova.context'])
self.schedule_launch_of_instance(inst)
return json.dumps({"server": self.instance_details(inst)})
def instance_details(self, inst):
return {
"id": inst.get("instance_id", None),
"imageId": inst.get("image_id", None),
"flavorId": inst.get("instacne_type", None),
"hostId": inst.get("node_name", None),
"status": inst.get("state", "pending"),
"addresses": {
"public": [self.network.get_public_ip_for_instance(
inst.get("instance_id", None)
)],
"private": [inst.get("private_dns_name", None)]
},
# implemented only by Rackspace, not AWS
"name": inst.get("name", "Not-Specified"),
# not supported
"progress": "Not-Supported",
"metadata": {
"Server Label": "Not-Supported",
"Image Version": "Not-Supported"
}
}
def build_server_instance(self, env, context):
reservation = utils.generate_uid('r')
ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
inst = self.instdir.new()
inst['name'] = env['server']['name']
inst['image_id'] = env['server']['imageId']
inst['instance_type'] = env['server']['flavorId']
inst['user_id'] = context['user'].id
inst['project_id'] = context['project'].id
inst['reservation_id'] = reservation
inst['launch_time'] = ltime
inst['mac_address'] = utils.generate_mac()
address = network.allocate_ip(
inst['user_id'],
inst['project_id'],
mac=inst['mac_address']
)
inst['private_dns_name'] = str(address)
inst['bridge_name'] = network.BridgedNetwork.get_network_for_project(
inst['user_id'],
inst['project_id'],
'default' # security group
)['bridge_name']
# key_data, key_name, ami_launch_index
# TODO(todd): key data or root password
inst.save()
return inst
def schedule_launch_of_instance(self, inst):
rpc.cast(
FLAGS.compute_topic,
{
"method": "run_instance",
"args": {"instance_id": inst.instance_id}
}
)
+8
View File
@@ -25,31 +25,39 @@ import logging
import sys
import traceback
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s'% (code, message))
class NotFound(Error):
pass
class Duplicate(Error):
pass
class NotAuthorized(Error):
pass
class NotEmpty(Error):
pass
class Invalid(Error):
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:
+3 -2
View File
@@ -16,12 +16,13 @@
# License for the specific language governing permissions and limitations
# under the License.
""" Based a bit on the carrot.backeds.queue backend... but a lot better """
"""Based a bit on the carrot.backeds.queue backend... but a lot better."""
from carrot.backends import base
import logging
import Queue as queue
from carrot.backends import base
class Message(base.BaseMessage):
pass
+151 -28
View File
@@ -21,16 +21,145 @@ Package-level global flags are defined here, the rest are defined
where they're used.
"""
import getopt
import socket
import sys
import gflags
from gflags import *
class FlagValues(gflags.FlagValues):
"""Extension of gflags.FlagValues that allows undefined and runtime flags.
Unknown flags will be ignored when parsing the command line, but the
command line will be kept so that it can be replayed if new flags are
defined after the initial parsing.
"""
def __init__(self):
gflags.FlagValues.__init__(self)
self.__dict__['__dirty'] = []
self.__dict__['__was_already_parsed'] = False
self.__dict__['__stored_argv'] = []
def __call__(self, argv):
# We're doing some hacky stuff here so that we don't have to copy
# out all the code of the original verbatim and then tweak a few lines.
# We're hijacking the output of getopt so we can still return the
# leftover args at the end
sneaky_unparsed_args = {"value": None}
original_argv = list(argv)
if self.IsGnuGetOpt():
orig_getopt = getattr(getopt, 'gnu_getopt')
orig_name = 'gnu_getopt'
else:
orig_getopt = getattr(getopt, 'getopt')
orig_name = 'getopt'
def _sneaky(*args, **kw):
optlist, unparsed_args = orig_getopt(*args, **kw)
sneaky_unparsed_args['value'] = unparsed_args
return optlist, unparsed_args
try:
setattr(getopt, orig_name, _sneaky)
args = gflags.FlagValues.__call__(self, argv)
except gflags.UnrecognizedFlagError:
# Undefined args were found, for now we don't care so just
# act like everything went well
# (these three lines are copied pretty much verbatim from the end
# of the __call__ function we are wrapping)
unparsed_args = sneaky_unparsed_args['value']
if unparsed_args:
if self.IsGnuGetOpt():
args = argv[:1] + unparsed
else:
args = argv[:1] + original_argv[-len(unparsed_args):]
else:
args = argv[:1]
finally:
setattr(getopt, orig_name, orig_getopt)
# Store the arguments for later, we'll need them for new flags
# added at runtime
self.__dict__['__stored_argv'] = original_argv
self.__dict__['__was_already_parsed'] = True
self.ClearDirty()
return args
def SetDirty(self, name):
"""Mark a flag as dirty so that accessing it will case a reparse."""
self.__dict__['__dirty'].append(name)
def IsDirty(self, name):
return name in self.__dict__['__dirty']
def ClearDirty(self):
self.__dict__['__is_dirty'] = []
def WasAlreadyParsed(self):
return self.__dict__['__was_already_parsed']
def ParseNewFlags(self):
if '__stored_argv' not in self.__dict__:
return
new_flags = FlagValues()
for k in self.__dict__['__dirty']:
new_flags[k] = gflags.FlagValues.__getitem__(self, k)
new_flags(self.__dict__['__stored_argv'])
for k in self.__dict__['__dirty']:
setattr(self, k, getattr(new_flags, k))
self.ClearDirty()
def __setitem__(self, name, flag):
gflags.FlagValues.__setitem__(self, name, flag)
if self.WasAlreadyParsed():
self.SetDirty(name)
def __getitem__(self, name):
if self.IsDirty(name):
self.ParseNewFlags()
return gflags.FlagValues.__getitem__(self, name)
def __getattr__(self, name):
if self.IsDirty(name):
self.ParseNewFlags()
return gflags.FlagValues.__getattr__(self, name)
FLAGS = FlagValues()
def _wrapper(func):
def _wrapped(*args, **kw):
kw.setdefault('flag_values', FLAGS)
func(*args, **kw)
_wrapped.func_name = func.func_name
return _wrapped
DEFINE_string = _wrapper(gflags.DEFINE_string)
DEFINE_integer = _wrapper(gflags.DEFINE_integer)
DEFINE_bool = _wrapper(gflags.DEFINE_bool)
DEFINE_boolean = _wrapper(gflags.DEFINE_boolean)
DEFINE_float = _wrapper(gflags.DEFINE_float)
DEFINE_enum = _wrapper(gflags.DEFINE_enum)
DEFINE_list = _wrapper(gflags.DEFINE_list)
DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist)
DEFINE_multistring = _wrapper(gflags.DEFINE_multistring)
DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int)
def DECLARE(name, module_string, flag_values=FLAGS):
if module_string not in sys.modules:
__import__(module_string, globals(), locals())
if name not in flag_values:
raise gflags.UnrecognizedFlag(
"%s not defined by %s" % (name, module_string))
# This keeps pylint from barfing on the imports
FLAGS = FLAGS
DEFINE_string = DEFINE_string
DEFINE_integer = DEFINE_integer
DEFINE_bool = DEFINE_bool
# __GLOBAL FLAGS ONLY__
# Define any app-specific flags in their own files, docs at:
@@ -46,29 +175,25 @@ DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses')
DEFINE_bool('fake_network', False,
'should we use fake network devices and addresses')
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_string('ec2_url',
'http://127.0.0.1:8773/services/Cloud',
'Url to ec2 api server')
DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
'Url to ec2 api server')
DEFINE_string('default_image',
'ami-11111',
'default image to use, testing only')
DEFINE_string('default_kernel',
'aki-11111',
'default kernel to use, testing only')
DEFINE_string('default_ramdisk',
'ari-11111',
'default ramdisk to use, testing only')
DEFINE_string('default_instance_type',
'm1.small',
'default instance type to use, testing only')
DEFINE_string('default_image', 'ami-11111',
'default image to use, testing only')
DEFINE_string('default_kernel', 'aki-11111',
'default kernel to use, testing only')
DEFINE_string('default_ramdisk', 'ari-11111',
'default ramdisk to use, testing only')
DEFINE_string('default_instance_type', 'm1.small',
'default instance type to use, testing only')
DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
DEFINE_string('vpn_key_suffix',
@@ -78,10 +203,8 @@ DEFINE_string('vpn_key_suffix',
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
# UNUSED
DEFINE_string('node_availability_zone',
'nova',
'availability zone of this node')
DEFINE_string('node_name',
socket.gethostname(),
'name of this node')
DEFINE_string('node_availability_zone', 'nova',
'availability zone of this node')
DEFINE_string('node_name', socket.gethostname(),
'name of this node')
+15 -7
View File
@@ -20,21 +20,29 @@
Exceptions for network errors.
"""
from nova.exception import Error
from nova import exception
class NoMoreAddresses(Error):
class NoMoreAddresses(exception.Error):
"""No More Addresses are available in the network"""
pass
class AddressNotAllocated(Error):
class AddressNotAllocated(exception.Error):
"""The specified address has not been allocated"""
pass
class AddressAlreadyAssociated(Error):
class AddressAlreadyAssociated(exception.Error):
"""The specified address has already been associated"""
pass
class AddressNotAssociated(Error):
class AddressNotAssociated(exception.Error):
"""The specified address is not associated"""
pass
class NotValidNetworkSize(Error):
pass
class NotValidNetworkSize(exception.Error):
"""The network size is not valid"""
pass
+72 -53
View File
@@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@@ -15,85 +13,102 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements vlans, bridges, and iptables rules using linux utilities.
"""
import logging
import signal
import os
import subprocess
import signal
# todo(ja): does the definition of network_path belong here?
from nova import utils
# TODO(ja): does the definition of network_path belong here?
from nova import flags
FLAGS=flags.FLAGS
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('dhcpbridge_flagfile',
'/etc/nova/nova-dhcpbridge.conf',
'location of flagfile for dhcpbridge')
def execute(cmd, addl_env=None):
"""Wrapper around utils.execute for fake_network"""
if FLAGS.fake_network:
logging.debug("FAKE NET: %s" % cmd)
logging.debug("FAKE NET: %s", cmd)
return "fake", 0
else:
return utils.execute(cmd, addl_env=addl_env)
def runthis(desc, cmd):
"""Wrapper around utils.runthis for fake_network"""
if FLAGS.fake_network:
return execute(cmd)
else:
return utils.runthis(desc,cmd)
def Popen(cmd):
if FLAGS.fake_network:
execute(' '.join(cmd))
else:
subprocess.Popen(cmd)
return utils.runthis(desc, cmd)
def device_exists(device):
(out, err) = execute("ifconfig %s" % device)
"""Check if ethernet device exists"""
(_out, err) = execute("ifconfig %s" % device)
return not err
def confirm_rule(cmd):
"""Delete and re-add iptables rule"""
execute("sudo iptables --delete %s" % (cmd))
execute("sudo iptables -I %s" % (cmd))
def remove_rule(cmd):
"""Remove iptables rule"""
execute("sudo iptables --delete %s" % (cmd))
def bind_public_ip(ip, interface):
runthis("Binding IP to interface: %s", "sudo ip addr add %s dev %s" % (ip, interface))
def unbind_public_ip(ip, interface):
runthis("Binding IP to interface: %s", "sudo ip addr del %s dev %s" % (ip, interface))
def bind_public_ip(public_ip, interface):
"""Bind ip to an interface"""
runthis("Binding IP to interface: %s",
"sudo ip addr add %s dev %s" % (public_ip, interface))
def unbind_public_ip(public_ip, interface):
"""Unbind a public ip from an interface"""
runthis("Binding IP to interface: %s",
"sudo ip addr del %s dev %s" % (public_ip, interface))
def vlan_create(net):
""" create a vlan on on a bridge device unless vlan already exists """
"""Create a vlan on on a bridge device unless vlan already exists"""
if not device_exists("vlan%s" % net['vlan']):
logging.debug("Starting VLAN inteface for %s network", (net['vlan']))
execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, net['vlan']))
execute("sudo ifconfig vlan%s up" % (net['vlan']))
def bridge_create(net):
""" create a bridge on a vlan unless it already exists """
"""Create a bridge on a vlan unless it already exists"""
if not device_exists(net['bridge_name']):
logging.debug("Starting Bridge inteface for %s network", (net['vlan']))
execute("sudo brctl addbr %s" % (net['bridge_name']))
execute("sudo brctl setfd %s 0" % (net.bridge_name))
# execute("sudo brctl setageing %s 10" % (net.bridge_name))
execute("sudo brctl stp %s off" % (net['bridge_name']))
execute("sudo brctl addif %s vlan%s" % (net['bridge_name'], net['vlan']))
execute("sudo brctl addif %s vlan%s" % (net['bridge_name'],
net['vlan']))
if net.bridge_gets_ip:
execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \
(net['bridge_name'], net.gateway, net.broadcast, net.netmask))
confirm_rule("FORWARD --in-interface %s -j ACCEPT" % (net['bridge_name']))
confirm_rule("FORWARD --in-interface %s -j ACCEPT" %
(net['bridge_name']))
else:
execute("sudo ifconfig %s up" % net['bridge_name'])
def dnsmasq_cmd(net):
def _dnsmasq_cmd(net):
"""Builds dnsmasq command"""
cmd = ['sudo -E dnsmasq',
' --strict-order',
' --bind-interfaces',
@@ -101,76 +116,81 @@ def dnsmasq_cmd(net):
' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'),
' --listen-address=%s' % net.dhcp_listen_address,
' --except-interface=lo',
' --dhcp-range=%s,static,600s' % (net.dhcp_range_start),
' --dhcp-range=%s,static,120s' % net.dhcp_range_start,
' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'),
' --dhcp-script=%s' % bin_file('nova-dhcpbridge'),
' --leasefile-ro']
return ''.join(cmd)
def hostDHCP(network, host, mac):
idx = host.split(".")[-1] # Logically, the idx of instances they've launched in this net
return "%s,%s-%s-%s.novalocal,%s" % \
(mac, network['user_id'], network['vlan'], idx, host)
# todo(ja): if the system has restarted or pid numbers have wrapped
def host_dhcp(address):
"""Return a host string for an address object"""
return "%s,%s.novalocal,%s" % (address['mac'],
address['hostname'],
address.address)
# TODO(ja): if the system has restarted or pid numbers have wrapped
# then you cannot be certain that the pid refers to the
# dnsmasq. As well, sending a HUP only reloads the hostfile,
# so any configuration options (like dchp-range, vlan, ...)
# aren't reloaded
def start_dnsmasq(network):
""" (re)starts a dnsmasq server for a given network
"""(Re)starts a dnsmasq server for a given network
if a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance
"""
with open(dhcp_file(network['vlan'], 'conf'), 'w') as f:
for host_name in network.hosts:
f.write("%s\n" % hostDHCP(network, host_name, network.hosts[host_name]))
for address in network.assigned_objs:
f.write("%s\n" % host_dhcp(address))
pid = dnsmasq_pid_for(network)
# if dnsmasq is already running, then tell it to reload
if pid:
# todo(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers
# TODO(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers
# correct dnsmasq process
try:
os.kill(pid, signal.SIGHUP)
except Exception, e:
logging.debug("Hupping dnsmasq threw %s", e)
# otherwise delete the existing leases file and start dnsmasq
lease_file = dhcp_file(network['vlan'], 'leases')
if os.path.exists(lease_file):
os.unlink(lease_file)
return
except Exception as exc: # pylint: disable-msg=W0703
logging.debug("Hupping dnsmasq threw %s", exc)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
'DNSMASQ_INTERFACE': network['bridge_name']}
execute(dnsmasq_cmd(network), addl_env=env)
execute(_dnsmasq_cmd(network), addl_env=env)
def stop_dnsmasq(network):
""" stops the dnsmasq instance for a given network """
"""Stops the dnsmasq instance for a given network"""
pid = dnsmasq_pid_for(network)
if pid:
try:
os.kill(pid, signal.SIGTERM)
except Exception, e:
logging.debug("Killing dnsmasq threw %s", e)
except Exception as exc: # pylint: disable-msg=W0703
logging.debug("Killing dnsmasq threw %s", exc)
def dhcp_file(vlan, kind):
""" return path to a pid, leases or conf file for a vlan """
"""Return path to a pid, leases or conf file for a vlan"""
return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind))
def bin_file(script):
"""Return the absolute path to scipt in the bin directory"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
def dnsmasq_pid_for(network):
""" the pid for prior dnsmasq instance for a vlan,
returns None if no pid file exists
if machine has rebooted pid might be incorrect (caller should check)
def dnsmasq_pid_for(network):
"""Returns he pid for prior dnsmasq instance for a vlan
Returns None if no pid file exists
If machine has rebooted pid might be incorrect (caller should check)
"""
pid_file = dhcp_file(network['vlan'], 'pid')
@@ -178,4 +198,3 @@ def dnsmasq_pid_for(network):
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
+241 -161
View File
@@ -20,11 +20,11 @@
Model Classes for network control, including VLANs, DHCP, and IP allocation.
"""
import IPy
import logging
import os
import time
import IPy
from nova import datastore
from nova import exception as nova_exception
from nova import flags
@@ -53,11 +53,13 @@ flags.DEFINE_integer('cnt_vpn_clients', 5,
flags.DEFINE_integer('cloudpipe_start_port', 12000,
'Starting port for mapped CloudPipe external ports')
logging.getLogger().setLevel(logging.DEBUG)
class Vlan(datastore.BasicModel):
def __init__(self, project, vlan):
"""Tracks vlans assigned to project it the datastore"""
def __init__(self, project, vlan): # pylint: disable-msg=W0231
"""
Since we don't want to try and find a vlan by its identifier,
but by a project id, we don't call super-init.
@@ -67,10 +69,12 @@ class Vlan(datastore.BasicModel):
@property
def identifier(self):
"""Datastore identifier"""
return "%s:%s" % (self.project_id, self.vlan_id)
@classmethod
def create(cls, project, vlan):
"""Create a Vlan object"""
instance = cls(project, vlan)
instance.save()
return instance
@@ -78,6 +82,7 @@ class Vlan(datastore.BasicModel):
@classmethod
@datastore.absorb_connection_error
def lookup(cls, project):
"""Returns object by project if it exists in datastore or None"""
set_name = cls._redis_set_name(cls.__name__)
vlan = datastore.Redis.instance().hget(set_name, project)
if vlan:
@@ -88,20 +93,20 @@ class Vlan(datastore.BasicModel):
@classmethod
@datastore.absorb_connection_error
def dict_by_project(cls):
"""a hash of project:vlan"""
"""A hash of project:vlan"""
set_name = cls._redis_set_name(cls.__name__)
return datastore.Redis.instance().hgetall(set_name)
return datastore.Redis.instance().hgetall(set_name) or {}
@classmethod
@datastore.absorb_connection_error
def dict_by_vlan(cls):
"""a hash of vlan:project"""
"""A hash of vlan:project"""
set_name = cls._redis_set_name(cls.__name__)
rv = {}
h = datastore.Redis.instance().hgetall(set_name)
for v in h.keys():
rv[h[v]] = v
return rv
retvals = {}
hashset = datastore.Redis.instance().hgetall(set_name) or {}
for (key, val) in hashset.iteritems():
retvals[val] = key
return retvals
@classmethod
@datastore.absorb_connection_error
@@ -119,39 +124,100 @@ class Vlan(datastore.BasicModel):
default way of saving into "vlan:ID" and adding to a set of "vlans".
"""
set_name = self._redis_set_name(self.__class__.__name__)
datastore.Redis.instance().hset(set_name, self.project_id, self.vlan_id)
datastore.Redis.instance().hset(set_name,
self.project_id,
self.vlan_id)
@datastore.absorb_connection_error
def destroy(self):
"""Removes the object from the datastore"""
set_name = self._redis_set_name(self.__class__.__name__)
datastore.Redis.instance().hdel(set_name, self.project_id)
def subnet(self):
"""Returns a string containing the subnet"""
vlan = int(self.vlan_id)
network = IPy.IP(FLAGS.private_range)
start = (vlan-FLAGS.vlan_start) * FLAGS.network_size
start = (vlan - FLAGS.vlan_start) * FLAGS.network_size
# minus one for the gateway.
return "%s-%s" % (network[start],
network[start + FLAGS.network_size - 1])
# CLEANUP:
# TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients
# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win?
# TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients
class BaseNetwork(datastore.BasicModel):
override_type = 'network'
NUM_STATIC_IPS = 3 # Network, Gateway, and CloudPipe
class FixedIp(datastore.BasicModel):
"""Represents a fixed ip in the datastore"""
def __init__(self, address):
self.address = address
super(FixedIp, self).__init__()
@property
def identifier(self):
return self.address
# NOTE(vish): address states allocated, leased, deallocated
def default_state(self):
return {'address': self.address,
'state': 'none'}
@classmethod
# pylint: disable-msg=R0913
def create(cls, user_id, project_id, address, mac, hostname, network_id):
"""Creates an FixedIp object"""
addr = cls(address)
addr['user_id'] = user_id
addr['project_id'] = project_id
addr['mac'] = mac
if hostname is None:
hostname = "ip-%s" % address.replace('.', '-')
addr['hostname'] = hostname
addr['network_id'] = network_id
addr['state'] = 'allocated'
addr.save()
return addr
def save(self):
is_new = self.is_new_record()
success = super(FixedIp, self).save()
if success and is_new:
self.associate_with("network", self['network_id'])
def destroy(self):
self.unassociate_with("network", self['network_id'])
super(FixedIp, self).destroy()
class ElasticIp(FixedIp):
"""Represents an elastic ip in the datastore"""
override_type = "address"
def default_state(self):
return {'address': self.address,
'instance_id': 'available',
'private_ip': 'available'}
# CLEANUP:
# TODO(ja): does vlanpool "keeper" need to know the min/max -
# shouldn't FLAGS always win?
class BaseNetwork(datastore.BasicModel):
"""Implements basic logic for allocating ips in a network"""
override_type = 'network'
address_class = FixedIp
@property
def identifier(self):
"""Datastore identifier"""
return self.network_id
def default_state(self):
"""Default values for new objects"""
return {'network_id': self.network_id, 'network_str': self.network_str}
@classmethod
# pylint: disable-msg=R0913
def create(cls, user_id, project_id, security_group, vlan, network_str):
"""Create a BaseNetwork object"""
network_id = "%s:%s" % (project_id, security_group)
net = cls(network_id, network_str)
net['user_id'] = user_id
@@ -169,91 +235,135 @@ class BaseNetwork(datastore.BasicModel):
@property
def network(self):
"""Returns a string representing the network"""
return IPy.IP(self['network_str'])
@property
def netmask(self):
"""Returns the netmask of this network"""
return self.network.netmask()
@property
def gateway(self):
"""Returns the network gateway address"""
return self.network[1]
@property
def broadcast(self):
"""Returns the network broadcast address"""
return self.network.broadcast()
@property
def bridge_name(self):
"""Returns the bridge associated with this network"""
return "br%s" % (self["vlan"])
@property
def user(self):
"""Returns the user associated with this network"""
return manager.AuthManager().get_user(self['user_id'])
@property
def project(self):
"""Returns the project associated with this network"""
return manager.AuthManager().get_project(self['project_id'])
@property
def _hosts_key(self):
return "network:%s:hosts" % (self['network_str'])
# pylint: disable-msg=R0913
def _add_host(self, user_id, project_id, ip_address, mac, hostname):
"""Add a host to the datastore"""
self.address_class.create(user_id, project_id, ip_address,
mac, hostname, self.identifier)
@property
def hosts(self):
return datastore.Redis.instance().hgetall(self._hosts_key) or {}
def _add_host(self, _user_id, _project_id, host, target):
datastore.Redis.instance().hset(self._hosts_key, host, target)
def _rem_host(self, host):
datastore.Redis.instance().hdel(self._hosts_key, host)
def _rem_host(self, ip_address):
"""Remove a host from the datastore"""
self.address_class(ip_address).destroy()
@property
def assigned(self):
return datastore.Redis.instance().hkeys(self._hosts_key)
"""Returns a list of all assigned addresses"""
return self.address_class.associated_keys('network', self.identifier)
@property
def assigned_objs(self):
"""Returns a list of all assigned addresses as objects"""
return self.address_class.associated_to('network', self.identifier)
def get_address(self, ip_address):
"""Returns a specific ip as an object"""
if ip_address in self.assigned:
return self.address_class(ip_address)
return None
@property
def available(self):
# the .2 address is always CloudPipe
# and the top <n> are for vpn clients
for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)):
"""Returns a list of all available addresses in the network"""
for idx in range(self.num_bottom_reserved_ips,
len(self.network) - self.num_top_reserved_ips):
address = str(self.network[idx])
if not address in self.hosts.keys():
if not address in self.assigned:
yield address
@property
def num_static_ips(self):
return BaseNetwork.NUM_STATIC_IPS
def num_bottom_reserved_ips(self):
"""Returns number of ips reserved at the bottom of the range"""
return 2 # Network, Gateway
def allocate_ip(self, user_id, project_id, mac):
@property
def num_top_reserved_ips(self):
"""Returns number of ips reserved at the top of the range"""
return 1 # Broadcast
def allocate_ip(self, user_id, project_id, mac, hostname=None):
"""Allocates an ip to a mac address"""
for address in self.available:
logging.debug("Allocating IP %s to %s" % (address, project_id))
self._add_host(user_id, project_id, address, mac)
logging.debug("Allocating IP %s to %s", address, project_id)
self._add_host(user_id, project_id, address, mac, hostname)
self.express(address=address)
return address
raise exception.NoMoreAddresses("Project %s with network %s" %
(project_id, str(self.network)))
(project_id, str(self.network)))
def lease_ip(self, ip_str):
logging.debug("Leasing allocated IP %s" % (ip_str))
def release_ip(self, ip_str):
"""Called when DHCP lease is activated"""
if not ip_str in self.assigned:
raise exception.AddressNotAllocated()
self.deexpress(address=ip_str)
address = self.get_address(ip_str)
if address:
logging.debug("Leasing allocated IP %s", ip_str)
address['state'] = 'leased'
address.save()
def release_ip(self, ip_str):
"""Called when DHCP lease expires
Removes the ip from the assigned list"""
if not ip_str in self.assigned:
raise exception.AddressNotAllocated()
logging.debug("Releasing IP %s", ip_str)
self._rem_host(ip_str)
self.deexpress(address=ip_str)
def deallocate_ip(self, ip_str):
# Do nothing for now, cleanup on ip release
"""Deallocates an allocated ip"""
if not ip_str in self.assigned:
raise exception.AddressNotAllocated()
address = self.get_address(ip_str)
if address:
if address['state'] != 'leased':
# NOTE(vish): address hasn't been leased, so release it
self.release_ip(ip_str)
else:
logging.debug("Deallocating allocated IP %s", ip_str)
address['state'] == 'deallocated'
address.save()
def express(self, address=None):
"""Set up network. Implemented in subclasses"""
pass
def list_addresses(self):
for address in self.hosts:
yield address
def express(self, address=None): pass
def deexpress(self, address=None): pass
def deexpress(self, address=None):
"""Tear down network. Implemented in subclasses"""
pass
class BridgedNetwork(BaseNetwork):
@@ -277,7 +387,11 @@ class BridgedNetwork(BaseNetwork):
override_type = 'network'
@classmethod
def get_network_for_project(cls, user_id, project_id, security_group):
def get_network_for_project(cls,
user_id,
project_id,
security_group='default'):
"""Returns network for a given project"""
vlan = get_vlan_for_project(project_id)
network_str = vlan.subnet()
return cls.create(user_id, project_id, security_group, vlan.vlan_id,
@@ -293,30 +407,36 @@ class BridgedNetwork(BaseNetwork):
linux_net.vlan_create(self)
linux_net.bridge_create(self)
class DHCPNetwork(BridgedNetwork):
"""
properties:
dhcp_listen_address: the ip of the gateway / dhcp host
dhcp_range_start: the first ip to give out
dhcp_range_end: the last ip to give out
"""
"""Network supporting DHCP"""
bridge_gets_ip = True
override_type = 'network'
def __init__(self, *args, **kwargs):
super(DHCPNetwork, self).__init__(*args, **kwargs)
# logging.debug("Initing DHCPNetwork object...")
self.dhcp_listen_address = self.network[1]
self.dhcp_range_start = self.network[3]
self.dhcp_range_end = self.network[-(1 + FLAGS.cnt_vpn_clients)]
try:
if not(os.path.exists(FLAGS.networks_path)):
os.makedirs(FLAGS.networks_path)
# NOTE(todd): I guess this is a lazy way to not have to check if the
# directory exists, but shouldn't we be smarter about
# telling the difference between existing directory and
# permission denied? (Errno 17 vs 13, OSError)
except Exception, err:
pass
@property
def num_bottom_reserved_ips(self):
# For cloudpipe
return super(DHCPNetwork, self).num_bottom_reserved_ips + 1
@property
def num_top_reserved_ips(self):
return super(DHCPNetwork, self).num_top_reserved_ips + \
FLAGS.cnt_vpn_clients
@property
def dhcp_listen_address(self):
"""Address where dhcp server should listen"""
return self.gateway
@property
def dhcp_range_start(self):
"""Starting address dhcp server should use"""
return self.network[self.num_bottom_reserved_ips]
def express(self, address=None):
super(DHCPNetwork, self).express(address=address)
@@ -326,20 +446,23 @@ class DHCPNetwork(BridgedNetwork):
linux_net.start_dnsmasq(self)
else:
logging.debug("Not launching dnsmasq: no hosts.")
self.express_cloudpipe()
self.express_vpn()
def allocate_vpn_ip(self, user_id, project_id, mac):
def allocate_vpn_ip(self, user_id, project_id, mac, hostname=None):
"""Allocates the reserved ip to a vpn instance"""
address = str(self.network[2])
self._add_host(user_id, project_id, address, mac)
self._add_host(user_id, project_id, address, mac, hostname)
self.express(address=address)
return address
def express_cloudpipe(self):
def express_vpn(self):
"""Sets up routing rules for vpn"""
private_ip = str(self.network[2])
linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT"
% (private_ip, ))
linux_net.confirm_rule("PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
% (self.project.vpn_ip, self.project.vpn_port, private_ip))
linux_net.confirm_rule(
"PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
% (self.project.vpn_ip, self.project.vpn_port, private_ip))
def deexpress(self, address=None):
# if this is the last address, stop dns
@@ -349,82 +472,39 @@ class DHCPNetwork(BridgedNetwork):
else:
linux_net.start_dnsmasq(self)
class PublicAddress(datastore.BasicModel):
override_type = "address"
def __init__(self, address):
self.address = address
super(PublicAddress, self).__init__()
@property
def identifier(self):
return self.address
def default_state(self):
return {'address': self.address}
@classmethod
def create(cls, user_id, project_id, address):
addr = cls(address)
addr['user_id'] = user_id
addr['project_id'] = project_id
addr['instance_id'] = 'available'
addr['private_ip'] = 'available'
addr.save()
return addr
DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)]
DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)]
class PublicNetworkController(BaseNetwork):
"""Handles elastic ips"""
override_type = 'network'
address_class = ElasticIp
def __init__(self, *args, **kwargs):
network_id = "public:default"
super(PublicNetworkController, self).__init__(network_id, FLAGS.public_range)
super(PublicNetworkController, self).__init__(network_id,
FLAGS.public_range, *args, **kwargs)
self['user_id'] = "public"
self['project_id'] = "public"
self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
time.gmtime())
self["vlan"] = FLAGS.public_vlan
self.save()
self.express()
@property
def available(self):
for idx in range(2, len(self.network)-1):
address = str(self.network[idx])
if not address in self.hosts.keys():
yield address
@property
def host_objs(self):
for address in self.assigned:
yield PublicAddress(address)
def get_host(self, host):
if host in self.assigned:
return PublicAddress(host)
return None
def _add_host(self, user_id, project_id, host, _target):
datastore.Redis.instance().hset(self._hosts_key, host, project_id)
PublicAddress.create(user_id, project_id, host)
def _rem_host(self, host):
PublicAddress(host).destroy()
datastore.Redis.instance().hdel(self._hosts_key, host)
def deallocate_ip(self, ip_str):
# NOTE(vish): cleanup is now done on release by the parent class
self.release_ip(ip_str)
self.release_ip(ip_str)
def associate_address(self, public_ip, private_ip, instance_id):
"""Associates a public ip to a private ip and instance id"""
if not public_ip in self.assigned:
raise exception.AddressNotAllocated()
# TODO(joshua): Keep an index going both ways
for addr in self.host_objs:
# TODO(josh): Keep an index going both ways
for addr in self.assigned_objs:
if addr.get('private_ip', None) == private_ip:
raise exception.AddressAlreadyAssociated()
addr = self.get_host(public_ip)
addr = self.get_address(public_ip)
if addr.get('private_ip', 'available') != 'available':
raise exception.AddressAlreadyAssociated()
addr['private_ip'] = private_ip
@@ -433,9 +513,10 @@ class PublicNetworkController(BaseNetwork):
self.express(address=public_ip)
def disassociate_address(self, public_ip):
"""Disassociates a public ip with its private ip"""
if not public_ip in self.assigned:
raise exception.AddressNotAllocated()
addr = self.get_host(public_ip)
addr = self.get_address(public_ip)
if addr.get('private_ip', 'available') == 'available':
raise exception.AddressNotAssociated()
self.deexpress(address=public_ip)
@@ -444,11 +525,14 @@ class PublicNetworkController(BaseNetwork):
addr.save()
def express(self, address=None):
addresses = self.host_objs
if address:
addresses = [self.get_host(address)]
if not address in self.assigned:
raise exception.AddressNotAllocated()
addresses = [self.get_address(address)]
else:
addresses = self.assigned_objs
for addr in addresses:
if addr.get('private_ip','available') == 'available':
if addr.get('private_ip', 'available') == 'available':
continue
public_ip = addr['address']
private_ip = addr['private_ip']
@@ -457,15 +541,16 @@ class PublicNetworkController(BaseNetwork):
% (public_ip, private_ip))
linux_net.confirm_rule("POSTROUTING -t nat -s %s -j SNAT --to %s"
% (private_ip, public_ip))
# TODO: Get these from the secgroup datastore entries
# TODO(joshua): Get these from the secgroup datastore entries
linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT"
% (private_ip))
for (protocol, port) in DEFAULT_PORTS:
linux_net.confirm_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT"
% (private_ip, protocol, port))
linux_net.confirm_rule(
"FORWARD -d %s -p %s --dport %s -j ACCEPT"
% (private_ip, protocol, port))
def deexpress(self, address=None):
addr = self.get_host(address)
addr = self.get_address(address)
private_ip = addr['private_ip']
linux_net.unbind_public_ip(address, FLAGS.public_interface)
linux_net.remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s"
@@ -479,19 +564,18 @@ class PublicNetworkController(BaseNetwork):
% (private_ip, protocol, port))
# FIXME(todd): does this present a race condition, or is there some piece of
# architecture that mitigates it (only one queue listener per net)?
# FIXME(todd): does this present a race condition, or is there some
# piece of architecture that mitigates it (only one queue
# listener per net)?
def get_vlan_for_project(project_id):
"""
Allocate vlan IDs to individual users.
"""
"""Allocate vlan IDs to individual users"""
vlan = Vlan.lookup(project_id)
if vlan:
return vlan
known_vlans = Vlan.dict_by_vlan()
for vnum in range(FLAGS.vlan_start, FLAGS.vlan_end):
vstr = str(vnum)
if not known_vlans.has_key(vstr):
if not vstr in known_vlans:
return Vlan.create(project_id, vnum)
old_project_id = known_vlans[vstr]
if not manager.AuthManager().get_project(old_project_id):
@@ -515,8 +599,9 @@ def get_vlan_for_project(project_id):
return Vlan.create(project_id, vnum)
raise exception.AddressNotAllocated("Out of VLANs")
def get_project_network(project_id, security_group='default'):
""" get a project's private network, allocating one if needed """
"""Gets a project's private network, allocating one if needed"""
project = manager.AuthManager().get_project(project_id)
if not project:
raise nova_exception.NotFound("Project %s doesn't exist." % project_id)
@@ -527,28 +612,23 @@ def get_project_network(project_id, security_group='default'):
def get_network_by_address(address):
# TODO(vish): This is completely the wrong way to do this, but
# I'm getting the network binary working before I
# tackle doing this the right way.
logging.debug("Get Network By Address: %s" % address)
for project in manager.AuthManager().get_projects():
net = get_project_network(project.id)
if address in net.assigned:
logging.debug("Found %s in %s" % (address, project.id))
return net
raise exception.AddressNotAllocated()
"""Gets the network for a given private ip"""
address_record = FixedIp.lookup(address)
if not address_record:
raise exception.AddressNotAllocated()
return get_project_network(address_record['project_id'])
def get_network_by_interface(iface, security_group='default'):
"""Gets the network for a given interface"""
vlan = iface.rpartition("br")[2]
project_id = Vlan.dict_by_vlan().get(vlan)
return get_project_network(project_id, security_group)
def get_public_ip_for_instance(instance_id):
# FIXME: this should be a lookup - iteration won't scale
for address_record in PublicAddress.all():
"""Gets the public ip for a given instance"""
# FIXME(josh): this should be a lookup - iteration won't scale
for address_record in ElasticIp.all():
if address_record.get('instance_id', 'available') == instance_id:
return address_record['address']
+56 -29
View File
@@ -17,60 +17,68 @@
# under the License.
"""
Network Nodes are responsible for allocating ips and setting up network
Network Hosts are responsible for allocating ips and setting up network
"""
from nova import datastore
from nova import exception
from nova import flags
from nova import service
from nova import utils
from nova.auth import manager
from nova.exception import NotFound
from nova.network import exception
from nova.network import model
from nova.network import vpn
FLAGS = flags.FLAGS
FLAGS = flags.FLAGS
flags.DEFINE_string('network_type',
'flat',
'Service Class for Networking')
flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances')
flags.DEFINE_list('flat_network_ips',
['192.168.0.2','192.168.0.3','192.168.0.4'],
['192.168.0.2', '192.168.0.3', '192.168.0.4'],
'Available ips for simple network')
flags.DEFINE_string('flat_network_network', '192.168.0.0',
'Network for simple network')
'Network for simple network')
flags.DEFINE_string('flat_network_netmask', '255.255.255.0',
'Netmask for simple network')
'Netmask for simple network')
flags.DEFINE_string('flat_network_gateway', '192.168.0.1',
'Broadcast for simple network')
'Broadcast for simple network')
flags.DEFINE_string('flat_network_broadcast', '192.168.0.255',
'Broadcast for simple network')
'Broadcast for simple network')
flags.DEFINE_string('flat_network_dns', '8.8.4.4',
'Dns for simple network')
'Dns for simple network')
def type_to_class(network_type):
"""Convert a network_type string into an actual Python class"""
if network_type == 'flat':
return FlatNetworkService
elif network_type == 'vlan':
elif network_type == 'vlan':
return VlanNetworkService
raise NotFound("Couldn't find %s network type" % network_type)
raise exception.NotFound("Couldn't find %s network type" % network_type)
def setup_compute_network(network_type, user_id, project_id, security_group):
"""Sets up the network on a compute host"""
srv = type_to_class(network_type)
srv.setup_compute_network(network_type, user_id, project_id, security_group)
srv.setup_compute_network(network_type,
user_id,
project_id,
security_group)
def get_host_for_project(project_id):
"""Get host allocated to project from datastore"""
redis = datastore.Redis.instance()
return redis.get(_host_key(project_id))
def _host_key(project_id):
return "network_host:%s" % project_id
"""Returns redis host key for network"""
return "networkhost:%s" % project_id
class BaseNetworkService(service.Service):
@@ -80,6 +88,7 @@ class BaseNetworkService(service.Service):
"""
def __init__(self, *args, **kwargs):
self.network = model.PublicNetworkController()
super(BaseNetworkService, self).__init__(*args, **kwargs)
def set_network_host(self, user_id, project_id, *args, **kwargs):
"""Safely sets the host of the projects network"""
@@ -109,7 +118,7 @@ class BaseNetworkService(service.Service):
pass
@classmethod
def setup_compute_network(self, user_id, project_id, security_group,
def setup_compute_network(cls, user_id, project_id, security_group,
*args, **kwargs):
"""Sets up matching network for compute hosts"""
raise NotImplementedError()
@@ -138,12 +147,14 @@ class FlatNetworkService(BaseNetworkService):
"""Basic network where no vlans are used"""
@classmethod
def setup_compute_network(self, user_id, project_id, security_group,
def setup_compute_network(cls, user_id, project_id, security_group,
*args, **kwargs):
"""Network is created manually"""
pass
def allocate_fixed_ip(self, user_id, project_id,
def allocate_fixed_ip(self,
user_id,
project_id,
security_group='default',
*args, **kwargs):
"""Gets a fixed ip from the pool
@@ -152,7 +163,7 @@ class FlatNetworkService(BaseNetworkService):
"""
# NOTE(vish): Some automation could be done here. For example,
# creating the flat_network_bridge and setting up
# a gateway. This is all done manually atm
# a gateway. This is all done manually atm.
redis = datastore.Redis.instance()
if not redis.exists('ips') and not len(redis.keys('instances:*')):
for fixed_ip in FLAGS.flat_network_ips:
@@ -160,6 +171,8 @@ class FlatNetworkService(BaseNetworkService):
fixed_ip = redis.spop('ips')
if not fixed_ip:
raise exception.NoMoreAddresses()
# TODO(vish): some sort of dns handling for hostname should
# probably be done here.
return {'inject_network': True,
'network_type': FLAGS.network_type,
'mac_address': utils.generate_mac(),
@@ -175,37 +188,51 @@ class FlatNetworkService(BaseNetworkService):
"""Returns an ip to the pool"""
datastore.Redis.instance().sadd('ips', fixed_ip)
class VlanNetworkService(BaseNetworkService):
"""Vlan network with dhcp"""
# NOTE(vish): A lot of the interactions with network/model.py can be
# simplified and improved. Also there it may be useful
# to support vlans separately from dhcp, instead of having
# both of them together in this class.
def allocate_fixed_ip(self, user_id, project_id,
# pylint: disable-msg=W0221
def allocate_fixed_ip(self,
user_id,
project_id,
security_group='default',
vpn=False, *args, **kwargs):
"""Gets a fixed ip from the pool """
is_vpn=False,
hostname=None,
*args, **kwargs):
"""Gets a fixed ip from the pool"""
mac = utils.generate_mac()
net = model.get_project_network(project_id)
if vpn:
fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac)
if is_vpn:
fixed_ip = net.allocate_vpn_ip(user_id,
project_id,
mac,
hostname)
else:
fixed_ip = net.allocate_ip(user_id, project_id, mac)
fixed_ip = net.allocate_ip(user_id,
project_id,
mac,
hostname)
return {'network_type': FLAGS.network_type,
'bridge_name': net['bridge_name'],
'mac_address': mac,
'private_dns_name' : fixed_ip}
'private_dns_name': fixed_ip}
def deallocate_fixed_ip(self, fixed_ip,
*args, **kwargs):
"""Returns an ip to the pool"""
return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip)
def lease_ip(self, address):
return model.get_network_by_address(address).lease_ip(address)
def lease_ip(self, fixed_ip):
"""Called by bridge when ip is leased"""
return model.get_network_by_address(fixed_ip).lease_ip(fixed_ip)
def release_ip(self, address):
return model.get_network_by_address(address).release_ip(address)
def release_ip(self, fixed_ip):
"""Called by bridge when ip is released"""
return model.get_network_by_address(fixed_ip).release_ip(fixed_ip)
def restart_nets(self):
"""Ensure the network for each user is enabled"""
@@ -218,7 +245,7 @@ class VlanNetworkService(BaseNetworkService):
vpn.NetworkData.create(project_id)
@classmethod
def setup_compute_network(self, user_id, project_id, security_group,
def setup_compute_network(cls, user_id, project_id, security_group,
*args, **kwargs):
"""Sets up matching network for compute hosts"""
# NOTE(vish): Use BridgedNetwork instead of DHCPNetwork because
+26 -16
View File
@@ -23,9 +23,8 @@ from nova import exception
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
'Public IP for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_start_port', 1000,
@@ -33,7 +32,9 @@ flags.DEFINE_integer('vpn_start_port', 1000,
flags.DEFINE_integer('vpn_end_port', 2000,
'End port for the cloudpipe VPN servers')
class NoMorePorts(exception.Error):
"""No ports available to allocate for the given ip"""
pass
@@ -67,34 +68,44 @@ class NetworkData(datastore.BasicModel):
return network_data
@classmethod
def find_free_port_for_ip(cls, ip):
def find_free_port_for_ip(cls, vpn_ip):
"""Finds a free port for a given ip from the redis set"""
# TODO(vish): these redis commands should be generalized and
# placed into a base class. Conceptually, it is
# similar to an association, but we are just
# storing a set of values instead of keys that
# should be turned into objects.
redis = datastore.Redis.instance()
key = 'ip:%s:ports' % ip
# TODO(vish): these ports should be allocated through an admin
# command instead of a flag
if (not redis.exists(key) and
not redis.exists(cls._redis_association_name('ip', ip))):
for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
redis.sadd(key, i)
cls._ensure_set_exists(vpn_ip)
port = redis.spop(key)
port = datastore.Redis.instance().spop(cls._redis_ports_key(vpn_ip))
if not port:
raise NoMorePorts()
return port
@classmethod
def num_ports_for_ip(cls, ip):
def _redis_ports_key(cls, vpn_ip):
"""Key that ports are stored under in redis"""
return 'ip:%s:ports' % vpn_ip
@classmethod
def _ensure_set_exists(cls, vpn_ip):
"""Creates the set of ports for the ip if it doesn't already exist"""
# TODO(vish): these ports should be allocated through an admin
# command instead of a flag
redis = datastore.Redis.instance()
if (not redis.exists(cls._redis_ports_key(vpn_ip)) and
not redis.exists(cls._redis_association_name('ip', vpn_ip))):
for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
redis.sadd(cls._redis_ports_key(vpn_ip), i)
@classmethod
def num_ports_for_ip(cls, vpn_ip):
"""Calculates the number of free ports for a given ip"""
return datastore.Redis.instance().scard('ip:%s:ports' % ip)
cls._ensure_set_exists(vpn_ip)
return datastore.Redis.instance().scard('ip:%s:ports' % vpn_ip)
@property
def ip(self):
def ip(self): # pylint: disable-msg=C0103
"""The ip assigned to the project"""
return self['ip']
@@ -113,4 +124,3 @@ class NetworkData(datastore.BasicModel):
self.unassociate_with('ip', self.ip)
datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
super(NetworkData, self).destroy()
+1
View File
@@ -36,6 +36,7 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('buckets_path', utils.abspath('../buckets'),
'path to s3 buckets')
class Bucket(object):
def __init__(self, name):
self.name = name
+43 -13
View File
@@ -38,17 +38,19 @@ S3 client with this module::
"""
import datetime
import logging
import json
import logging
import multiprocessing
import os
from tornado import escape
import urllib
from twisted.application import internet, service
from twisted.web.resource import Resource
from twisted.web import server, static, error
from tornado import escape
from twisted.application import internet
from twisted.application import service
from twisted.web import error
from twisted.web import resource
from twisted.web import server
from twisted.web import static
from nova import exception
from nova import flags
@@ -60,6 +62,7 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
def render_xml(request, value):
assert isinstance(value, dict) and len(value) == 1
request.setHeader("Content-Type", "application/xml; charset=UTF-8")
@@ -72,11 +75,13 @@ def render_xml(request, value):
request.write('</' + escape.utf8(name) + '>')
request.finish()
def finish(request, content=None):
if content:
request.write(content)
request.finish()
def _render_parts(value, write_cb):
if isinstance(value, basestring):
write_cb(escape.xhtml_escape(value))
@@ -95,11 +100,13 @@ def _render_parts(value, write_cb):
else:
raise Exception("Unknown S3 value type %r", value)
def get_argument(request, key, default_value):
if key in request.args:
return request.args[key][0]
return default_value
def get_context(request):
try:
# Authorization Header format: 'AWS <access>:<secret>'
@@ -120,13 +127,14 @@ def get_context(request):
logging.debug("Authentication Failure: %s" % ex)
raise exception.NotAuthorized
class ErrorHandlingResource(Resource):
class ErrorHandlingResource(resource.Resource):
"""Maps exceptions to 404 / 401 codes. Won't work for exceptions thrown after NOT_DONE_YET is returned."""
# TODO(unassigned) (calling-all-twisted-experts): This needs to be plugged in to the right place in twisted...
# This doesn't look like it's the right place (consider exceptions in getChild; or after NOT_DONE_YET is returned
def render(self, request):
try:
return Resource.render(self, request)
return resource.Resource.render(self, request)
except exception.NotFound:
request.setResponseCode(404)
return ''
@@ -134,6 +142,7 @@ class ErrorHandlingResource(Resource):
request.setResponseCode(403)
return ''
class S3(ErrorHandlingResource):
"""Implementation of an S3-like storage server based on local files."""
def getChild(self, name, request):
@@ -154,9 +163,10 @@ class S3(ErrorHandlingResource):
}})
return server.NOT_DONE_YET
class BucketResource(ErrorHandlingResource):
def __init__(self, name):
Resource.__init__(self)
resource.Resource.__init__(self)
self.name = name
def getChild(self, name, request):
@@ -206,7 +216,7 @@ class BucketResource(ErrorHandlingResource):
class ObjectResource(ErrorHandlingResource):
def __init__(self, bucket, name):
Resource.__init__(self)
resource.Resource.__init__(self)
self.bucket = bucket
self.name = name
@@ -245,17 +255,19 @@ class ObjectResource(ErrorHandlingResource):
request.setResponseCode(204)
return ''
class ImageResource(ErrorHandlingResource):
isLeaf = True
def __init__(self, name):
Resource.__init__(self)
resource.Resource.__init__(self)
self.img = image.Image(name)
def render_GET(self, request):
return static.File(self.img.image_path, defaultType='application/octet-stream').render_GET(request)
class ImagesResource(Resource):
class ImagesResource(resource.Resource):
def getChild(self, name, request):
if name == '':
return self
@@ -269,7 +281,23 @@ class ImagesResource(Resource):
images = [i for i in image.Image.all() \
if i.is_authorized(request.context, readonly=True)]
request.write(json.dumps([i.metadata for i in images]))
# Bug #617776:
# We used to have 'type' in the image metadata, but this field
# should be called 'imageType', as per the EC2 specification.
# For compat with old metadata files we copy type to imageType if
# imageType is not present.
# For compat with euca2ools (and any other clients using the
# incorrect name) we copy imageType to type.
# imageType is primary if we end up with both in the metadata file
# (which should never happen).
def decorate(m):
if 'imageType' not in m and 'type' in m:
m[u'imageType'] = m['type']
elif 'imageType' in m:
m[u'type'] = m['imageType']
return m
request.write(json.dumps([decorate(i.metadata) for i in images]))
request.finish()
return server.NOT_DONE_YET
@@ -323,11 +351,13 @@ class ImagesResource(Resource):
request.setResponseCode(204)
return ''
def get_site():
root = S3()
site = server.Site(root)
return site
def get_application():
factory = get_site()
application = service.Application("objectstore")
+3 -2
View File
@@ -42,6 +42,7 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('images_path', utils.abspath('../images'),
'path to decrypted images')
class Image(object):
def __init__(self, image_id):
self.image_id = image_id
@@ -148,7 +149,7 @@ class Image(object):
'imageOwnerId': 'system',
'isPublic': public,
'architecture': 'x86_64',
'type': image_type,
'imageType': image_type,
'state': 'available'
}
@@ -195,7 +196,7 @@ class Image(object):
'imageOwnerId': context.project.id,
'isPublic': False, # FIXME: grab public from manifest
'architecture': 'x86_64', # FIXME: grab architecture from manifest
'type' : image_type
'imageType' : image_type
}
def write_state(state):
+2 -2
View File
@@ -23,7 +23,7 @@ Properties of an object stored within a bucket.
import os
import nova.crypto
from nova.exception import NotFound, NotAuthorized
from nova import exception
class Object(object):
@@ -33,7 +33,7 @@ class Object(object):
self.key = key
self.path = bucket._object_path(key)
if not os.path.isfile(self.path):
raise NotFound
raise exception.NotFound
def __repr__(self):
return "<Object %s/%s>" % (self.bucket, self.key)
+3
View File
@@ -22,6 +22,7 @@ Process pool, still buggy right now.
"""
import StringIO
from twisted.internet import defer
from twisted.internet import error
from twisted.internet import protocol
@@ -190,6 +191,7 @@ class ProcessPool(object):
self._pool.release()
return retval
class SharedPool(object):
_instance = None
def __init__(self):
@@ -198,5 +200,6 @@ class SharedPool(object):
def __getattr__(self, key):
return getattr(self._instance, key)
def simple_execute(cmd, **kwargs):
return SharedPool().simple_execute(cmd, **kwargs)
+114 -32
View File
@@ -21,14 +21,14 @@ AMQP-based RPC. Queues have consumers and publishers.
No fan-out support yet.
"""
from carrot import connection
from carrot import messaging
import json
import logging
import sys
import uuid
from carrot import connection as carrot_connection
from carrot import messaging
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet import task
from nova import exception
@@ -39,13 +39,15 @@ from nova import flags
FLAGS = flags.FLAGS
_log = logging.getLogger('amqplib')
_log.setLevel(logging.WARN)
LOG = logging.getLogger('amqplib')
LOG.setLevel(logging.DEBUG)
class Connection(connection.BrokerConnection):
class Connection(carrot_connection.BrokerConnection):
"""Connection instance object"""
@classmethod
def instance(cls):
"""Returns the instance"""
if not hasattr(cls, '_instance'):
params = dict(hostname=FLAGS.rabbit_host,
port=FLAGS.rabbit_port,
@@ -56,18 +58,33 @@ class Connection(connection.BrokerConnection):
if FLAGS.fake_rabbit:
params['backend_cls'] = fakerabbit.Backend
# NOTE(vish): magic is fun!
# pylint: disable-msg=W0142
cls._instance = cls(**params)
return cls._instance
@classmethod
def recreate(cls):
"""Recreates the connection instance
This is necessary to recover from some network errors/disconnects"""
del cls._instance
return cls.instance()
class Consumer(messaging.Consumer):
"""Consumer base class
Contains methods for connecting the fetch method to async loops
"""
def __init__(self, *args, **kwargs):
self.failed_connection = False
super(Consumer, self).__init__(*args, **kwargs)
# TODO(termie): it would be nice to give these some way of automatically
# cleaning up after themselves
def attach_to_tornado(self, io_inst=None):
"""Attach a callback to tornado that fires 10 times a second"""
from tornado import ioloop
if io_inst is None:
io_inst = ioloop.IOLoop.instance()
@@ -79,33 +96,44 @@ class Consumer(messaging.Consumer):
attachToTornado = attach_to_tornado
def fetch(self, *args, **kwargs):
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
"""Wraps the parent fetch with some logic for failed connections"""
# TODO(vish): the logic for failed connections and logging should be
# refactored into some sort of connection manager object
try:
if getattr(self, 'failed_connection', False):
# attempt to reconnect
if self.failed_connection:
# NOTE(vish): conn is defined in the parent class, we can
# recreate it as long as we create the backend too
# pylint: disable-msg=W0201
self.conn = Connection.recreate()
self.backend = self.conn.create_backend()
super(Consumer, self).fetch(*args, **kwargs)
if getattr(self, 'failed_connection', False):
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
if self.failed_connection:
logging.error("Reconnected to queue")
self.failed_connection = False
except Exception, ex:
if not getattr(self, 'failed_connection', False):
# NOTE(vish): This is catching all errors because we really don't
# exceptions to be logged 10 times a second if some
# persistent failure occurs.
except Exception: # pylint: disable-msg=W0703
if not self.failed_connection:
logging.exception("Failed to fetch message from queue")
self.failed_connection = True
def attach_to_twisted(self):
"""Attach a callback to twisted that fires 10 times a second"""
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
loop.start(interval=0.1)
class Publisher(messaging.Publisher):
"""Publisher base class"""
pass
class TopicConsumer(Consumer):
"""Consumes messages on a specific topic"""
exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast"):
self.queue = topic
self.routing_key = topic
@@ -115,14 +143,24 @@ class TopicConsumer(Consumer):
class AdapterConsumer(TopicConsumer):
"""Calls methods on a proxy object based on method and args"""
def __init__(self, connection=None, topic="broadcast", proxy=None):
_log.debug('Initing the Adapter Consumer for %s' % (topic))
LOG.debug('Initing the Adapter Consumer for %s' % (topic))
self.proxy = proxy
super(AdapterConsumer, self).__init__(connection=connection, topic=topic)
super(AdapterConsumer, self).__init__(connection=connection,
topic=topic)
@exception.wrap_exception
def receive(self, message_data, message):
_log.debug('received %s' % (message_data))
"""Magically looks for a method on the proxy object and calls it
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
LOG.debug('received %s' % (message_data))
msg_id = message_data.pop('_msg_id', None)
method = message_data.get('method')
@@ -133,21 +171,25 @@ class AdapterConsumer(TopicConsumer):
# messages stay in the queue indefinitely, so for now
# we just log the message and send an error string
# back to the caller
_log.warn('no method for message: %s' % (message_data))
LOG.warn('no method for message: %s' % (message_data))
msg_reply(msg_id, 'No method for message: %s' % message_data)
return
node_func = getattr(self.proxy, str(method))
node_args = dict((str(k), v) for k, v in args.iteritems())
# NOTE(vish): magic is fun!
# pylint: disable-msg=W0142
d = defer.maybeDeferred(node_func, **node_args)
if msg_id:
d.addCallback(lambda rval: msg_reply(msg_id, rval))
d.addErrback(lambda e: msg_reply(msg_id, str(e)))
d.addCallback(lambda rval: msg_reply(msg_id, rval, None))
d.addErrback(lambda e: msg_reply(msg_id, None, e))
return
class TopicPublisher(Publisher):
"""Publishes messages on a specific topic"""
exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast"):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
@@ -156,7 +198,9 @@ class TopicPublisher(Publisher):
class DirectConsumer(Consumer):
"""Consumes messages directly on a channel specified by msg_id"""
exchange_type = "direct"
def __init__(self, connection=None, msg_id=None):
self.queue = msg_id
self.routing_key = msg_id
@@ -166,7 +210,9 @@ class DirectConsumer(Consumer):
class DirectPublisher(Publisher):
"""Publishes messages directly on a channel specified by msg_id"""
exchange_type = "direct"
def __init__(self, connection=None, msg_id=None):
self.routing_key = msg_id
self.exchange = msg_id
@@ -174,32 +220,63 @@ class DirectPublisher(Publisher):
super(DirectPublisher, self).__init__(connection=connection)
def msg_reply(msg_id, reply):
def msg_reply(msg_id, reply=None, failure=None):
"""Sends a reply or an error on the channel signified by msg_id
failure should be a twisted failure object"""
if failure:
message = failure.getErrorMessage()
traceback = failure.getTraceback()
logging.error("Returning exception %s to caller", message)
logging.error(traceback)
failure = (failure.type.__name__, str(failure.value), traceback)
conn = Connection.instance()
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
try:
publisher.send({'result': reply})
publisher.send({'result': reply, 'failure': failure})
except TypeError:
publisher.send(
{'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems())
})
for k, v in reply.__dict__.iteritems()),
'failure': failure})
publisher.close()
class RemoteError(exception.Error):
"""Signifies that a remote class has raised an exception
Containes a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevent info."""
def __init__(self, exc_type, value, traceback):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__("%s %s\n%s" % (exc_type,
value,
traceback))
def call(topic, msg):
_log.debug("Making asynchronous call...")
"""Sends a message on a topic and wait for a response"""
LOG.debug("Making asynchronous call...")
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
_log.debug("MSG_ID is %s" % (msg_id))
LOG.debug("MSG_ID is %s" % (msg_id))
conn = Connection.instance()
d = defer.Deferred()
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
def deferred_receive(data, message):
"""Acks message and callbacks or errbacks"""
message.ack()
d.callback(data)
if data['failure']:
return d.errback(RemoteError(*data['failure']))
else:
return d.callback(data['result'])
consumer.register_callback(deferred_receive)
injected = consumer.attach_to_tornado()
@@ -213,7 +290,8 @@ def call(topic, msg):
def cast(topic, msg):
_log.debug("Making asynchronous cast...")
"""Sends a message on a topic without waiting for a response"""
LOG.debug("Making asynchronous cast...")
conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic)
publisher.send(msg)
@@ -221,16 +299,18 @@ def cast(topic, msg):
def generic_response(message_data, message):
_log.debug('response %s', message_data)
"""Logs a result and exits"""
LOG.debug('response %s', message_data)
message.ack()
sys.exit(0)
def send_message(topic, message, wait=True):
"""Sends a message for testing"""
msg_id = uuid.uuid4().hex
message.update({'_msg_id': msg_id})
_log.debug('topic is %s', topic)
_log.debug('message %s', message)
LOG.debug('topic is %s', topic)
LOG.debug('message %s', message)
if wait:
consumer = messaging.Consumer(connection=Connection.instance(),
@@ -253,6 +333,8 @@ def send_message(topic, message, wait=True):
consumer.wait()
# TODO: Replace with a docstring test
if __name__ == "__main__":
# NOTE(vish): you can send messages from the command line using
# topic and a json sting representing a dictionary
# for the method
send_message(sys.argv[1], json.loads(sys.argv[2]))
+10 -10
View File
@@ -52,13 +52,8 @@ def stop(pidfile):
"""
# Get the pid from the pidfile
try:
pf = file(pidfile,'r')
pid = int(pf.read().strip())
pf.close()
pid = int(open(pidfile,'r').read().strip())
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % pidfile)
return # not an error in a restart
@@ -79,14 +74,15 @@ def stop(pidfile):
def serve(name, main):
"""Controller for server"""
argv = FLAGS(sys.argv)
if not FLAGS.pidfile:
FLAGS.pidfile = '%s.pid' % name
logging.debug("Full set of FLAGS: \n\n\n" )
logging.debug("Full set of FLAGS: \n\n\n")
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None) ))
logging.debug("%s : %s", flag, FLAGS.get(flag, None))
action = 'start'
if len(argv) > 1:
@@ -102,7 +98,11 @@ def serve(name, main):
else:
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
daemonize(argv, name, main)
def daemonize(args, name, main):
"""Does the work of daemonizing the process"""
logging.getLogger('amqplib').setLevel(logging.WARN)
if FLAGS.daemonize:
logger = logging.getLogger()
@@ -115,7 +115,7 @@ def serve(name, main):
else:
if not FLAGS.logfile:
FLAGS.logfile = '%s.log' % name
logfile = logging.handlers.FileHandler(FLAGS.logfile)
logfile = logging.FileHandler(FLAGS.logfile)
logfile.setFormatter(formatter)
logger.addHandler(logfile)
stdin, stdout, stderr = None, None, None
@@ -137,4 +137,4 @@ def serve(name, main):
stdout=stdout,
stderr=stderr
):
main(argv)
main(args)
+53 -48
View File
@@ -22,15 +22,14 @@ Allows overriding of flags for use of fakes,
and some black magic for inline callbacks.
"""
import logging
import sys
import time
import mox
import stubout
import time
import unittest
from tornado import ioloop
from twisted.internet import defer
from twisted.python import failure
from twisted.trial import unittest as trial_unittest
from twisted.trial import unittest
from nova import fakerabbit
from nova import flags
@@ -41,20 +40,21 @@ flags.DEFINE_bool('fake_tests', True,
'should we use everything for testing')
def skip_if_fake(f):
def skip_if_fake(func):
"""Decorator that skips a test if running in fake mode"""
def _skipper(*args, **kw):
"""Wrapped skipper function"""
if FLAGS.fake_tests:
raise trial_unittest.SkipTest('Test cannot be run in fake mode')
raise unittest.SkipTest('Test cannot be run in fake mode')
else:
return f(*args, **kw)
_skipper.func_name = f.func_name
return func(*args, **kw)
return _skipper
class TrialTestCase(trial_unittest.TestCase):
def setUp(self):
class TrialTestCase(unittest.TestCase):
"""Test case base class for all unit tests"""
def setUp(self): # pylint: disable-msg=C0103
"""Run before each test method to initialize test environment"""
super(TrialTestCase, self).setUp()
# emulate some of the mox stuff, we can't use the metaclass
@@ -63,7 +63,8 @@ class TrialTestCase(trial_unittest.TestCase):
self.stubs = stubout.StubOutForTesting()
self.flag_overrides = {}
def tearDown(self):
def tearDown(self): # pylint: disable-msg=C0103
"""Runs after each test method to finalize/tear down test environment"""
super(TrialTestCase, self).tearDown()
self.reset_flags()
self.mox.UnsetStubs()
@@ -75,6 +76,7 @@ class TrialTestCase(trial_unittest.TestCase):
fakerabbit.reset_all()
def flags(self, **kw):
"""Override flag variables for a test"""
for k, v in kw.iteritems():
if k in self.flag_overrides:
self.reset_flags()
@@ -84,13 +86,16 @@ class TrialTestCase(trial_unittest.TestCase):
setattr(FLAGS, k, v)
def reset_flags(self):
"""Resets all flag variables for the test. Runs after each test"""
for k, v in self.flag_overrides.iteritems():
setattr(FLAGS, k, v)
class BaseTestCase(TrialTestCase):
def setUp(self):
# TODO(jaypipes): Can this be moved into the TrialTestCase class?
"""Base test case class for all unit tests."""
def setUp(self): # pylint: disable-msg=C0103
"""Run before each test method to initialize test environment"""
super(BaseTestCase, self).setUp()
# TODO(termie): we could possibly keep a more global registry of
# the injected listeners... this is fine for now though
@@ -98,33 +103,27 @@ class BaseTestCase(TrialTestCase):
self.ioloop = ioloop.IOLoop.instance()
self._waiting = None
self._doneWaiting = False
self._timedOut = False
self.set_up()
self._done_waiting = False
self._timed_out = False
def set_up(self):
pass
def tear_down(self):
pass
def tearDown(self):
def tearDown(self):# pylint: disable-msg=C0103
"""Runs after each test method to finalize/tear down test environment"""
super(BaseTestCase, self).tearDown()
for x in self.injected:
x.stop()
if FLAGS.fake_rabbit:
fakerabbit.reset_all()
self.tear_down()
def _waitForTest(self, timeout=60):
def _wait_for_test(self, timeout=60):
""" Push the ioloop along to wait for our test to complete. """
self._waiting = self.ioloop.add_timeout(time.time() + timeout,
self._timeout)
def _wait():
if self._timedOut:
"""Wrapped wait function. Called on timeout."""
if self._timed_out:
self.fail('test timed out')
self._done()
if self._doneWaiting:
if self._done_waiting:
self.ioloop.stop()
return
# we can use add_callback here but this uses less cpu when testing
@@ -134,15 +133,18 @@ class BaseTestCase(TrialTestCase):
self.ioloop.start()
def _done(self):
"""Callback used for cleaning up deferred test methods."""
if self._waiting:
try:
self.ioloop.remove_timeout(self._waiting)
except Exception:
except Exception: # pylint: disable-msg=W0703
# TODO(jaypipes): This produces a pylint warning. Should
# we really be catching Exception and then passing here?
pass
self._waiting = None
self._doneWaiting = True
self._done_waiting = True
def _maybeInlineCallbacks(self, f):
def _maybe_inline_callbacks(self, func):
""" If we're doing async calls in our tests, wait on them.
This is probably the most complicated hunk of code we have so far.
@@ -165,7 +167,7 @@ class BaseTestCase(TrialTestCase):
d.addCallback(_describe)
d.addCallback(_checkDescribe)
d.addCallback(lambda x: self._done())
self._waitForTest()
self._wait_for_test()
Example (inline callbacks! yay!):
@@ -179,16 +181,17 @@ class BaseTestCase(TrialTestCase):
# TODO(termie): this can be a wrapper function instead and
# and we can make a metaclass so that we don't
# have to copy all that "run" code below.
g = f()
g = func()
if not hasattr(g, 'send'):
self._done()
return defer.succeed(g)
inlined = defer.inlineCallbacks(f)
inlined = defer.inlineCallbacks(func)
d = inlined()
return d
def _catchExceptions(self, result, failure):
def _catch_exceptions(self, result, failure):
"""Catches all exceptions and handles keyboard interrupts."""
exc = (failure.type, failure.value, failure.getTracebackObject())
if isinstance(failure.value, self.failureException):
result.addFailure(self, exc)
@@ -200,44 +203,46 @@ class BaseTestCase(TrialTestCase):
self._done()
def _timeout(self):
"""Helper method which trips the timeouts"""
self._waiting = False
self._timedOut = True
self._timed_out = True
def run(self, result=None):
if result is None: result = self.defaultTestResult()
"""Runs the test case"""
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
test_method = getattr(self, self._testMethodName)
try:
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
result.addError(self, sys.exc_info())
return
ok = False
try:
d = self._maybeInlineCallbacks(testMethod)
d.addErrback(lambda x: self._catchExceptions(result, x))
d = self._maybe_inline_callbacks(test_method)
d.addErrback(lambda x: self._catch_exceptions(result, x))
d.addBoth(lambda x: self._done() and x)
self._waitForTest()
self._wait_for_test()
ok = True
except self.failureException:
result.addFailure(self, self._exc_info())
result.addFailure(self, sys.exc_info())
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
result.addError(self, sys.exc_info())
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
result.addError(self, sys.exc_info())
ok = False
if ok: result.addSuccess(self)
if ok:
result.addSuccess(self)
finally:
result.stopTest(self)
+15 -1
View File
@@ -179,7 +179,21 @@ class AuthTestCase(test.BaseTestCase):
project.add_role('test1', 'sysadmin')
self.assertTrue(project.has_role('test1', 'sysadmin'))
def test_211_can_remove_project_role(self):
def test_211_can_list_project_roles(self):
project = self.manager.get_project('testproj')
user = self.manager.get_user('test1')
self.manager.add_role(user, 'netadmin', project)
roles = self.manager.get_user_roles(user)
self.assertTrue('sysadmin' in roles)
self.assertFalse('netadmin' in roles)
project_roles = self.manager.get_user_roles(user, project)
self.assertTrue('sysadmin' in project_roles)
self.assertTrue('netadmin' in project_roles)
# has role should be false because global role is missing
self.assertFalse(self.manager.has_role(user, 'netadmin', project))
def test_212_can_remove_project_role(self):
project = self.manager.get_project('testproj')
self.assertTrue(project.has_role('test1', 'sysadmin'))
project.remove_role('test1', 'sysadmin')
+1 -1
View File
@@ -132,7 +132,7 @@ class CloudTestCase(test.BaseTestCase):
'state': 0x01,
'user_data': ''
}
rv = self.cloud._format_instances(self.context)
rv = self.cloud._format_describe_instances(self.context)
self.assert_(len(rv['reservationSet']) == 0)
# simulate launch of 5 instances
@@ -16,36 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import cloudservers
from nova import flags
class IdFake:
def __init__(self, id):
self.id = id
FLAGS = flags.FLAGS
# to get your access key:
# from nova.auth import users
# users.UserManger.instance().get_users()[0].access
rscloud = cloudservers.CloudServers(
'admin',
'6cca875e-5ab3-4c60-9852-abf5c5c60cc6'
)
rscloud.client.AUTH_URL = 'http://localhost:8773/v1.0'
rv = rscloud.servers.list()
print "SERVERS: %s" % rv
if len(rv) == 0:
server = rscloud.servers.create(
"test-server",
IdFake("ami-tiny"),
IdFake("m1.tiny")
)
print "LAUNCH: %s" % server
else:
server = rv[0]
print "Server to kill: %s" % server
raw_input("press enter key to kill the server")
server.delete()
flags.DEFINE_integer('answer', 42, 'test flag')
+87
View File
@@ -0,0 +1,87 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import flags
from nova import test
class FlagsTestCase(test.TrialTestCase):
def setUp(self):
super(FlagsTestCase, self).setUp()
self.FLAGS = flags.FlagValues()
self.global_FLAGS = flags.FLAGS
def test_define(self):
self.assert_('string' not in self.FLAGS)
self.assert_('int' not in self.FLAGS)
self.assert_('false' not in self.FLAGS)
self.assert_('true' not in self.FLAGS)
flags.DEFINE_string('string', 'default', 'desc', flag_values=self.FLAGS)
flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS)
flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS)
flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS)
self.assert_(self.FLAGS['string'])
self.assert_(self.FLAGS['int'])
self.assert_(self.FLAGS['false'])
self.assert_(self.FLAGS['true'])
self.assertEqual(self.FLAGS.string, 'default')
self.assertEqual(self.FLAGS.int, 1)
self.assertEqual(self.FLAGS.false, False)
self.assertEqual(self.FLAGS.true, True)
argv = ['flags_test',
'--string', 'foo',
'--int', '2',
'--false',
'--notrue']
self.FLAGS(argv)
self.assertEqual(self.FLAGS.string, 'foo')
self.assertEqual(self.FLAGS.int, 2)
self.assertEqual(self.FLAGS.false, True)
self.assertEqual(self.FLAGS.true, False)
def test_declare(self):
self.assert_('answer' not in self.global_FLAGS)
flags.DECLARE('answer', 'nova.tests.declare_flags')
self.assert_('answer' in self.global_FLAGS)
self.assertEqual(self.global_FLAGS.answer, 42)
# Make sure we don't overwrite anything
self.global_FLAGS.answer = 256
self.assertEqual(self.global_FLAGS.answer, 256)
flags.DECLARE('answer', 'nova.tests.declare_flags')
self.assertEqual(self.global_FLAGS.answer, 256)
def test_runtime_and_unknown_flags(self):
self.assert_('runtime_answer' not in self.global_FLAGS)
argv = ['flags_test', '--runtime_answer=60', 'extra_arg']
args = self.global_FLAGS(argv)
self.assertEqual(len(args), 2)
self.assertEqual(args[1], 'extra_arg')
self.assert_('runtime_answer' not in self.global_FLAGS)
import nova.tests.runtime_flags
self.assert_('runtime_answer' in self.global_FLAGS)
self.assertEqual(self.global_FLAGS.runtime_answer, 60)
+133 -101
View File
@@ -15,7 +15,9 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for network code
"""
import IPy
import os
import logging
@@ -31,8 +33,10 @@ from nova.network.exception import NoMoreAddresses
FLAGS = flags.FLAGS
class NetworkTestCase(test.TrialTestCase):
def setUp(self):
"""Test cases for network code"""
def setUp(self): # pylint: disable-msg=C0103
super(NetworkTestCase, self).setUp()
# NOTE(vish): if you change these flags, make sure to change the
# flags in the corresponding section in nova-dhcpbridge
@@ -43,7 +47,6 @@ class NetworkTestCase(test.TrialTestCase):
network_size=32)
logging.getLogger().setLevel(logging.DEBUG)
self.manager = manager.AuthManager()
self.dnsmasq = FakeDNSMasq()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
self.projects.append(self.manager.create_project('netuser',
@@ -54,47 +57,49 @@ class NetworkTestCase(test.TrialTestCase):
self.projects.append(self.manager.create_project(name,
'netuser',
name))
self.network = model.PublicNetworkController()
vpn.NetworkData.create(self.projects[i].id)
self.service = service.VlanNetworkService()
def tearDown(self):
def tearDown(self): # pylint: disable-msg=C0103
super(NetworkTestCase, self).tearDown()
for project in self.projects:
self.manager.delete_project(project)
self.manager.delete_user(self.user)
def test_public_network_allocation(self):
"""Makes sure that we can allocaate a public ip"""
pubnet = IPy.IP(flags.FLAGS.public_range)
address = self.network.allocate_ip(self.user.id, self.projects[0].id, "public")
address = self.service.allocate_elastic_ip(self.user.id,
self.projects[0].id)
self.assertTrue(IPy.IP(address) in pubnet)
self.assertTrue(IPy.IP(address) in self.network.network)
def test_allocate_deallocate_fixed_ip(self):
result = yield self.service.allocate_fixed_ip(
"""Makes sure that we can allocate and deallocate a fixed ip"""
result = self.service.allocate_fixed_ip(
self.user.id, self.projects[0].id)
address = result['private_dns_name']
mac = result['mac_address']
logging.debug("Was allocated %s" % (address))
net = model.get_project_network(self.projects[0].id, "default")
self.assertEqual(True, is_in_project(address, self.projects[0].id))
hostname = "test-host"
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
rv = self.service.deallocate_fixed_ip(address)
issue_ip(mac, address, hostname, net.bridge_name)
self.service.deallocate_fixed_ip(address)
# Doesn't go away until it's dhcp released
self.assertEqual(True, is_in_project(address, self.projects[0].id))
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
release_ip(mac, address, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address, self.projects[0].id))
def test_range_allocation(self):
hostname = "test-host"
result = yield self.service.allocate_fixed_ip(
self.user.id, self.projects[0].id)
def test_side_effects(self):
"""Ensures allocating and releasing has no side effects"""
hostname = "side-effect-host"
result = self.service.allocate_fixed_ip(self.user.id,
self.projects[0].id)
mac = result['mac_address']
address = result['private_dns_name']
result = yield self.service.allocate_fixed_ip(
self.user, self.projects[1].id)
result = self.service.allocate_fixed_ip(self.user,
self.projects[1].id)
secondmac = result['mac_address']
secondaddress = result['private_dns_name']
@@ -102,66 +107,75 @@ class NetworkTestCase(test.TrialTestCase):
secondnet = model.get_project_network(self.projects[1].id, "default")
self.assertEqual(True, is_in_project(address, self.projects[0].id))
self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
self.assertEqual(True, is_in_project(secondaddress,
self.projects[1].id))
self.assertEqual(False, is_in_project(address, self.projects[1].id))
# Addresses are allocated before they're issued
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
self.dnsmasq.issue_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
issue_ip(mac, address, hostname, net.bridge_name)
issue_ip(secondmac, secondaddress, hostname, secondnet.bridge_name)
rv = self.service.deallocate_fixed_ip(address)
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.service.deallocate_fixed_ip(address)
release_ip(mac, address, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address, self.projects[0].id))
# First address release shouldn't affect the second
self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
self.assertEqual(True, is_in_project(secondaddress,
self.projects[1].id))
rv = self.service.deallocate_fixed_ip(secondaddress)
self.dnsmasq.release_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id))
self.service.deallocate_fixed_ip(secondaddress)
release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name)
self.assertEqual(False, is_in_project(secondaddress,
self.projects[1].id))
def test_subnet_edge(self):
result = yield self.service.allocate_fixed_ip(self.user.id,
"""Makes sure that private ips don't overlap"""
result = self.service.allocate_fixed_ip(self.user.id,
self.projects[0].id)
firstaddress = result['private_dns_name']
hostname = "toomany-hosts"
for i in range(1,5):
for i in range(1, 5):
project_id = self.projects[i].id
result = yield self.service.allocate_fixed_ip(
result = self.service.allocate_fixed_ip(
self.user, project_id)
mac = result['mac_address']
address = result['private_dns_name']
result = yield self.service.allocate_fixed_ip(
result = self.service.allocate_fixed_ip(
self.user, project_id)
mac2 = result['mac_address']
address2 = result['private_dns_name']
result = yield self.service.allocate_fixed_ip(
result = self.service.allocate_fixed_ip(
self.user, project_id)
mac3 = result['mac_address']
address3 = result['private_dns_name']
self.assertEqual(False, is_in_project(address, self.projects[0].id))
self.assertEqual(False, is_in_project(address2, self.projects[0].id))
self.assertEqual(False, is_in_project(address3, self.projects[0].id))
rv = self.service.deallocate_fixed_ip(address)
rv = self.service.deallocate_fixed_ip(address2)
rv = self.service.deallocate_fixed_ip(address3)
net = model.get_project_network(project_id, "default")
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
issue_ip(mac, address, hostname, net.bridge_name)
issue_ip(mac2, address2, hostname, net.bridge_name)
issue_ip(mac3, address3, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address,
self.projects[0].id))
self.assertEqual(False, is_in_project(address2,
self.projects[0].id))
self.assertEqual(False, is_in_project(address3,
self.projects[0].id))
self.service.deallocate_fixed_ip(address)
self.service.deallocate_fixed_ip(address2)
self.service.deallocate_fixed_ip(address3)
release_ip(mac, address, hostname, net.bridge_name)
release_ip(mac2, address2, hostname, net.bridge_name)
release_ip(mac3, address3, hostname, net.bridge_name)
net = model.get_project_network(self.projects[0].id, "default")
rv = self.service.deallocate_fixed_ip(firstaddress)
self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name)
self.service.deallocate_fixed_ip(firstaddress)
release_ip(mac, firstaddress, hostname, net.bridge_name)
def test_212_vpn_ip_and_port_looks_valid(self):
vpn.NetworkData.create(self.projects[0].id)
def test_vpn_ip_and_port_looks_valid(self):
"""Ensure the vpn ip and port are reasonable"""
self.assert_(self.projects[0].vpn_ip)
self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port)
self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port)
def test_too_many_vpns(self):
"""Ensure error is raised if we run out of vpn ports"""
vpns = []
for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
vpns.append(vpn.NetworkData.create("vpnuser%s" % i))
@@ -169,84 +183,102 @@ class NetworkTestCase(test.TrialTestCase):
for network_datum in vpns:
network_datum.destroy()
def test_release_before_deallocate(self):
pass
def test_ips_are_reused(self):
"""Makes sure that ip addresses that are deallocated get reused"""
result = self.service.allocate_fixed_ip(
self.user.id, self.projects[0].id)
mac = result['mac_address']
address = result['private_dns_name']
def test_deallocate_before_issued(self):
pass
hostname = "reuse-host"
net = model.get_project_network(self.projects[0].id, "default")
def test_too_many_addresses(self):
"""
Here, we test that a proper NoMoreAddresses exception is raised.
issue_ip(mac, address, hostname, net.bridge_name)
self.service.deallocate_fixed_ip(address)
release_ip(mac, address, hostname, net.bridge_name)
However, the number of available IP addresses depends on the test
result = self.service.allocate_fixed_ip(
self.user, self.projects[0].id)
secondmac = result['mac_address']
secondaddress = result['private_dns_name']
self.assertEqual(address, secondaddress)
issue_ip(secondmac, secondaddress, hostname, net.bridge_name)
self.service.deallocate_fixed_ip(secondaddress)
release_ip(secondmac, secondaddress, hostname, net.bridge_name)
def test_available_ips(self):
"""Make sure the number of available ips for the network is correct
The number of available IP addresses depends on the test
environment's setup.
Network size is set in test fixture's setUp method.
There are FLAGS.cnt_vpn_clients addresses reserved for VPN (NUM_RESERVED_VPN_IPS)
And there are NUM_STATIC_IPS that are always reserved by Nova for the necessary
services (gateway, CloudPipe, etc)
So we should get flags.network_size - (NUM_STATIC_IPS +
NUM_PREALLOCATED_IPS +
NUM_RESERVED_VPN_IPS)
usable addresses
There are ips reserved at the bottom and top of the range.
services (network, gateway, CloudPipe, broadcast)
"""
net = model.get_project_network(self.projects[0].id, "default")
num_preallocated_ips = len(net.assigned)
net_size = flags.FLAGS.network_size
num_available_ips = net_size - (net.num_bottom_reserved_ips +
num_preallocated_ips +
net.num_top_reserved_ips)
self.assertEqual(num_available_ips, len(list(net.available)))
# Determine expected number of available IP addresses
num_static_ips = net.num_static_ips
num_preallocated_ips = len(net.hosts.keys())
num_reserved_vpn_ips = flags.FLAGS.cnt_vpn_clients
num_available_ips = flags.FLAGS.network_size - (num_static_ips +
num_preallocated_ips +
num_reserved_vpn_ips)
def test_too_many_addresses(self):
"""Test for a NoMoreAddresses exception when all fixed ips are used.
"""
net = model.get_project_network(self.projects[0].id, "default")
hostname = "toomany-hosts"
macs = {}
addresses = {}
for i in range(0, (num_available_ips - 1)):
result = yield self.service.allocate_fixed_ip(self.user.id, self.projects[0].id)
# Number of availaible ips is len of the available list
num_available_ips = len(list(net.available))
for i in range(num_available_ips):
result = self.service.allocate_fixed_ip(self.user.id,
self.projects[0].id)
macs[i] = result['mac_address']
addresses[i] = result['private_dns_name']
self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
self.assertFailure(self.service.allocate_fixed_ip(self.user.id, self.projects[0].id), NoMoreAddresses)
self.assertEqual(len(list(net.available)), 0)
self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip,
self.user.id, self.projects[0].id)
for i in range(len(addresses)):
self.service.deallocate_fixed_ip(addresses[i])
release_ip(macs[i], addresses[i], hostname, net.bridge_name)
self.assertEqual(len(list(net.available)), num_available_ips)
for i in range(0, (num_available_ips - 1)):
rv = self.service.deallocate_fixed_ip(addresses[i])
self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name)
def is_in_project(address, project_id):
return address in model.get_project_network(project_id).list_addresses()
"""Returns true if address is in specified project"""
return address in model.get_project_network(project_id).assigned
def _get_project_addresses(project_id):
project_addresses = []
for addr in model.get_project_network(project_id).list_addresses():
project_addresses.append(addr)
return project_addresses
def binpath(script):
"""Returns the absolute path to a script in bin"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
class FakeDNSMasq(object):
def issue_ip(self, mac, ip, hostname, interface):
cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'),
mac, ip, hostname)
env = {'DNSMASQ_INTERFACE': interface,
'TESTING' : '1',
'FLAGFILE' : FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
logging.debug("ISSUE_IP: %s, %s " % (out, err))
def release_ip(self, mac, ip, hostname, interface):
cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'),
mac, ip, hostname)
env = {'DNSMASQ_INTERFACE': interface,
'TESTING' : '1',
'FLAGFILE' : FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
logging.debug("RELEASE_IP: %s, %s " % (out, err))
def issue_ip(mac, private_ip, hostname, interface):
"""Run add command on dhcpbridge"""
cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'),
mac, private_ip, hostname)
env = {'DNSMASQ_INTERFACE': interface,
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
logging.debug("ISSUE_IP: %s, %s ", out, err)
def release_ip(mac, private_ip, hostname, interface):
"""Run del command on dhcpbridge"""
cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'),
mac, private_ip, hostname)
env = {'DNSMASQ_INTERFACE': interface,
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
logging.debug("RELEASE_IP: %s, %s ", out, err)
+85
View File
@@ -0,0 +1,85 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using queue
"""
import logging
from twisted.internet import defer
from nova import flags
from nova import rpc
from nova import test
FLAGS = flags.FLAGS
class RpcTestCase(test.BaseTestCase):
"""Test cases for rpc"""
def setUp(self): # pylint: disable-msg=C0103
super(RpcTestCase, self).setUp()
self.conn = rpc.Connection.instance()
self.receiver = TestReceiver()
self.consumer = rpc.AdapterConsumer(connection=self.conn,
topic='test',
proxy=self.receiver)
self.injected.append(self.consumer.attach_to_tornado(self.ioloop))
def test_call_succeed(self):
"""Get a value through rpc call"""
value = 42
result = yield rpc.call('test', {"method": "echo",
"args": {"value": value}})
self.assertEqual(value, result)
def test_call_exception(self):
"""Test that exception gets passed back properly
rpc.call returns a RemoteError object. The value of the
exception is converted to a string, so we convert it back
to an int in the test.
"""
value = 42
self.assertFailure(rpc.call('test', {"method": "fail",
"args": {"value": value}}),
rpc.RemoteError)
try:
yield rpc.call('test', {"method": "fail",
"args": {"value": value}})
self.fail("should have thrown rpc.RemoteError")
except rpc.RemoteError as exc:
self.assertEqual(int(exc.value), value)
class TestReceiver(object):
"""Simple Proxy class so the consumer has methods to call
Uses static methods because we aren't actually storing any state"""
@staticmethod
def echo(value):
"""Simply returns whatever value is sent in"""
logging.debug("Received %s", value)
return defer.succeed(value)
@staticmethod
def fail(value):
"""Raises an exception with the value sent in"""
raise Exception(value)
+23
View File
@@ -0,0 +1,23 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('runtime_answer', 54, 'test flag')
+69
View File
@@ -0,0 +1,69 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
+59 -18
View File
@@ -17,6 +17,10 @@
# under the License.
import logging
import shutil
import tempfile
from twisted.internet import defer
from nova import compute
from nova import exception
@@ -34,10 +38,16 @@ class VolumeTestCase(test.TrialTestCase):
super(VolumeTestCase, self).setUp()
self.compute = compute.service.ComputeService()
self.volume = None
self.tempdir = tempfile.mkdtemp()
self.flags(connection_type='fake',
fake_storage=True)
fake_storage=True,
aoe_export_dir=self.tempdir)
self.volume = volume_service.VolumeService()
def tearDown(self):
shutil.rmtree(self.tempdir)
@defer.inlineCallbacks
def test_run_create_volume(self):
vol_size = '0'
user_id = 'fake'
@@ -48,34 +58,40 @@ class VolumeTestCase(test.TrialTestCase):
volume_service.get_volume(volume_id)['volume_id'])
rv = self.volume.delete_volume(volume_id)
self.assertFailure(volume_service.get_volume(volume_id),
exception.Error)
self.assertRaises(exception.Error, volume_service.get_volume, volume_id)
@defer.inlineCallbacks
def test_too_big_volume(self):
vol_size = '1001'
user_id = 'fake'
project_id = 'fake'
self.assertRaises(TypeError,
self.volume.create_volume,
vol_size, user_id, project_id)
try:
yield self.volume.create_volume(vol_size, user_id, project_id)
self.fail("Should have thrown TypeError")
except TypeError:
pass
@defer.inlineCallbacks
def test_too_many_volumes(self):
vol_size = '1'
user_id = 'fake'
project_id = 'fake'
num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
total_slots = FLAGS.slots_per_shelf * num_shelves
total_slots = FLAGS.blades_per_shelf * num_shelves
vols = []
from nova import datastore
redis = datastore.Redis.instance()
for i in xrange(total_slots):
vid = yield self.volume.create_volume(vol_size, user_id, project_id)
vols.append(vid)
self.assertFailure(self.volume.create_volume(vol_size,
user_id,
project_id),
volume_service.NoMoreVolumes)
volume_service.NoMoreBlades)
for id in vols:
yield self.volume.delete_volume(id)
@defer.inlineCallbacks
def test_run_attach_detach_volume(self):
# Create one volume and one compute to test with
instance_id = "storage-test"
@@ -84,22 +100,26 @@ class VolumeTestCase(test.TrialTestCase):
project_id = 'fake'
mountpoint = "/dev/sdf"
volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
volume_obj = volume_service.get_volume(volume_id)
volume_obj.start_attach(instance_id, mountpoint)
rv = yield self.compute.attach_volume(volume_id,
instance_id,
mountpoint)
if FLAGS.fake_tests:
volume_obj.finish_attach()
else:
rv = yield self.compute.attach_volume(instance_id,
volume_id,
mountpoint)
self.assertEqual(volume_obj['status'], "in-use")
self.assertEqual(volume_obj['attachStatus'], "attached")
self.assertEqual(volume_obj['attach_status'], "attached")
self.assertEqual(volume_obj['instance_id'], instance_id)
self.assertEqual(volume_obj['mountpoint'], mountpoint)
self.assertRaises(exception.Error,
self.volume.delete_volume,
volume_id)
rv = yield self.volume.detach_volume(volume_id)
self.assertFailure(self.volume.delete_volume(volume_id), exception.Error)
volume_obj.start_detach()
if FLAGS.fake_tests:
volume_obj.finish_detach()
else:
rv = yield self.volume.detach_volume(instance_id,
volume_id)
volume_obj = volume_service.get_volume(volume_id)
self.assertEqual(volume_obj['status'], "available")
@@ -108,6 +128,27 @@ class VolumeTestCase(test.TrialTestCase):
volume_service.get_volume,
volume_id)
@defer.inlineCallbacks
def test_multiple_volume_race_condition(self):
vol_size = "5"
user_id = "fake"
project_id = 'fake'
shelf_blades = []
def _check(volume_id):
vol = volume_service.get_volume(volume_id)
shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id'])
self.assert_(shelf_blade not in shelf_blades)
shelf_blades.append(shelf_blade)
logging.debug("got %s" % shelf_blade)
vol.destroy()
deferreds = []
for i in range(5):
d = self.volume.create_volume(vol_size, user_id, project_id)
d.addCallback(_check)
d.addErrback(self.fail)
deferreds.append(d)
yield defer.DeferredList(deferreds)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
+1 -9
View File
@@ -241,15 +241,7 @@ def serve(filename):
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
class NoNewlineFormatter(logging.Formatter):
"""Strips newlines from default formatter"""
def format(self, record):
"""Grabs default formatter's output and strips newlines"""
data = logging.Formatter.format(self, record)
return data.replace("\n", "--")
# NOTE(vish): syslog-ng doesn't handle newlines from trackbacks very well
formatter = NoNewlineFormatter(
formatter = logging.Formatter(
'(%(name)s): %(levelname)s %(message)s')
handler = logging.StreamHandler(log.StdioOnnaStick())
handler.setFormatter(formatter)
+17 -8
View File
@@ -20,7 +20,7 @@
System-level utilities and helper functions.
"""
from datetime import datetime, timedelta
import datetime
import inspect
import logging
import os
@@ -32,9 +32,11 @@ import sys
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
@@ -44,6 +46,7 @@ def import_class(import_str):
except (ImportError, ValueError, AttributeError):
raise exception.NotFound('Class %s cannot be found' % class_str)
def fetchfile(url, target):
logging.debug("Fetching %s" % url)
# c = pycurl.Curl()
@@ -126,16 +129,22 @@ def get_my_ip():
'''
if getattr(FLAGS, 'fake_tests', None):
return '127.0.0.1'
csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('www.google.com', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('www.google.com', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.gaierror as ex:
logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex)
return "127.0.0.1"
def isotime(at=None):
if not at:
at = datetime.utcnow()
at = datetime.datetime.utcnow()
return at.strftime(TIME_FORMAT)
def parse_isotime(timestr):
return datetime.strptime(timestr, TIME_FORMAT)
return datetime.datetime.strptime(timestr, TIME_FORMAT)
+1
View File
@@ -57,6 +57,7 @@ def rangetest(**argchecks): # validate ranges for both+defaults
return onCall
return onDecorator
def typetest(**argchecks):
def onDecorator(func):
import sys
+9
View File
@@ -27,6 +27,15 @@ FLAGS = flags.FLAGS
def get_connection(read_only=False):
"""Returns an object representing the connection to a virtualization
platform. This could be nova.virt.fake.FakeConnection in test mode,
a connection to KVM or QEMU via libvirt, or a connection to XenServer
or Xen Cloud Platform via XenAPI.
Any object returned here must conform to the interface documented by
FakeConnection.
"""
# TODO(termie): maybe lazy load after initial check for permissions
# TODO(termie): check whether we can be disconnected
t = FLAGS.connection_type
+142 -2
View File
@@ -19,10 +19,13 @@
"""
A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor.
This module also documents the semantics of real hypervisor connections.
"""
import logging
from twisted.internet import defer
from nova.compute import power_state
@@ -32,6 +35,38 @@ def get_connection(_):
class FakeConnection(object):
"""
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
things are only known higher up the stack.
Most virtualization platforms will also have their own identity schemes,
to uniquely identify a VM or domain. These IDs must stay internal to the
platform-specific layer, and never escape the connection interface. The
platform-specific layer is responsible for keeping track of which instance
ID maps to which platform-specific ID, and vice versa.
In contrast, the list_disks and list_interfaces calls may return
platform-specific IDs. These identify a specific virtual disk or specific
virtual network interface, and these IDs are opaque to the rest of Nova.
Some methods here take an instance of nova.compute.service.Instance. This
is the datastructure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
translating that generic datastructure into terms that are specific to the
virtualization platform.
"""
def __init__(self):
self.instances = {}
@@ -42,20 +77,70 @@ class FakeConnection(object):
return cls._instance
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
layer, as a list.
"""
return self.instances.keys()
def spawn(self, instance):
"""
Create a new instance/VM/domain on the virtualization platform.
The given parameter is an instance of nova.compute.service.Instance.
This function should use the data there to guide the creation of
the new instance.
The work will be done asynchronously. This function returns a
Deferred that allows the caller to detect when it is complete.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
"""
fake_instance = FakeInstance()
self.instances[instance.name] = fake_instance
fake_instance._state = power_state.RUNNING
return defer.succeed(None)
def reboot(self, instance):
pass
"""
Reboot the specified instance.
The given parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name.
The work will be done asynchronously. This function returns a
Deferred that allows the caller to detect when it is complete.
"""
return defer.succeed(None)
def destroy(self, instance):
"""
Destroy (shutdown and delete) the specified instance.
The given parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name.
The work will be done asynchronously. This function returns a
Deferred that allows the caller to detect when it is complete.
"""
del self.instances[instance.name]
return defer.succeed(None)
def get_info(self, instance_id):
"""
Get a block of information about the given instance. This is returned
as a dictionary containing 'state': The power_state of the instance,
'max_mem': The maximum memory for the instance, in KiB, 'mem': The
current memory the instance has, in KiB, 'num_cpu': The current number
of virtual CPUs the instance has, 'cpu_time': The total CPU time used
by the instance, in nanoseconds.
"""
i = self.instances[instance_id]
return {'state': i._state,
'max_mem': 0,
@@ -64,15 +149,70 @@ class FakeConnection(object):
'cpu_time': 0}
def list_disks(self, instance_id):
"""
Return the IDs of all the virtual disks attached to the specified
instance, as a list. These IDs are opaque to the caller (they are
only useful for giving back to this layer as a parameter to
disk_stats). These IDs only need to be unique for a given instance.
Note that this function takes an instance ID, not a
compute.service.Instance, so that it can be called by compute.monitor.
"""
return ['A_DISK']
def list_interfaces(self, instance_id):
"""
Return the IDs of all the virtual network interfaces attached to the
specified instance, as a list. These IDs are opaque to the caller
(they are only useful for giving back to this layer as a parameter to
interface_stats). These IDs only need to be unique for a given
instance.
Note that this function takes an instance ID, not a
compute.service.Instance, so that it can be called by compute.monitor.
"""
return ['A_VIF']
def block_stats(self, instance_id, disk_id):
"""
Return performance counters associated with the given disk_id on the
given instance_id. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
full pipeline.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID, not a
compute.service.Instance, so that it can be called by compute.monitor.
"""
return [0L, 0L, 0L, 0L, null]
def interface_stats(self, instance_id, iface_id):
"""
Return performance counters associated with the given iface_id on the
given instance_id. These are returned as [rx_bytes, rx_packets,
rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
indicates receive, tx indicates transmit, bytes and packets indicate
the total number of bytes or packets transferred, and errs and dropped
is the total number of packets failed / dropped.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID, not a
compute.service.Instance, so that it can be called by compute.monitor.
"""
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
+13 -7
View File
@@ -23,14 +23,15 @@ Handling of VM disk images.
import os.path
import time
import urlparse
from nova import flags
from nova import process
from nova.auth import signer
from nova.auth import manager
from nova.auth import signer
FLAGS = flags.FLAGS
flags.DEFINE_bool('use_s3', True,
'whether to get images from s3 or use local copy')
@@ -42,8 +43,9 @@ def fetch(image, path, user, project):
f = _fetch_local_image
return f(image, path, user, project)
def _fetch_s3_image(image, path, user, project):
url = _image_url('%s/image' % image)
url = image_url(image)
# This should probably move somewhere else, like e.g. a download_as
# method on User objects and at the same time get rewritten to use
@@ -51,11 +53,11 @@ def _fetch_s3_image(image, path, user, project):
headers = {}
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
uri = '/' + url.partition('/')[2]
(_, _, url_path, _, _, _) = urlparse.urlparse(url)
access = manager.AuthManager().get_access_key(user, project)
signature = signer.Signer(user.secret.encode()).s3_authorization(headers,
'GET',
uri)
url_path)
headers['Authorization'] = 'AWS %s:%s' % (access, signature)
cmd = ['/usr/bin/curl', '--fail', '--silent', url]
@@ -65,12 +67,16 @@ def _fetch_s3_image(image, path, user, project):
cmd += ['-o', path]
return process.SharedPool().execute(executable=cmd[0], args=cmd[1:])
def _fetch_local_image(image, path, user, project):
source = _image_path('%s/image' % image)
return process.simple_execute('cp %s %s' % (source, path))
def _image_path(path):
return os.path.join(FLAGS.images_path, path)
def _image_url(path):
return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path)
def image_url(image):
return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
image)
+25
View File
@@ -0,0 +1,25 @@
<domain type='%(type)s'>
<name>%(name)s</name>
<memory>%(memory_kb)s</memory>
<os>
<type>%(type)s</type>
<kernel>/usr/bin/linux</kernel>
<root>/dev/ubda1</root>
</os>
<devices>
<disk type='file'>
<source file='%(basepath)s/disk'/>
<target dev='ubd0' bus='uml'/>
</disk>
<interface type='bridge'>
<source bridge='%(bridge_name)s'/>
<mac address='%(mac_address)s'/>
</interface>
<console type="pty" />
<serial type="file">
<source path='%(basepath)s/console.log'/>
<target port='1'/>
</serial>
</devices>
<nova>%(nova)s</nova>
</domain>
+42 -44
View File
@@ -42,17 +42,25 @@ from nova.virt import images
libvirt = None
libxml2 = None
FLAGS = flags.FLAGS
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('compute/libvirt.xml.template'),
'Libvirt XML Template')
utils.abspath('virt/libvirt.qemu.xml.template'),
'Libvirt XML Template for QEmu/KVM')
flags.DEFINE_string('libvirt_uml_xml_template',
utils.abspath('virt/libvirt.uml.xml.template'),
'Libvirt XML Template for user-mode-linux')
flags.DEFINE_string('injected_network_template',
utils.abspath('compute/interfaces.template'),
utils.abspath('virt/interfaces.template'),
'Template file for injected network')
flags.DEFINE_string('libvirt_type',
'kvm',
'Libvirt domain type (kvm, qemu, etc)')
'Libvirt domain type (valid options are: kvm, qemu, uml)')
flags.DEFINE_string('libvirt_uri',
'',
'Override the default libvirt URI (which is dependent'
' on libvirt_type)')
def get_connection(read_only):
# These are loaded late so that there's no need to install these
@@ -68,20 +76,41 @@ def get_connection(read_only):
class LibvirtConnection(object):
def __init__(self, read_only):
self.libvirt_uri, template_file = self.get_uri_and_template()
self.libvirt_xml = open(template_file).read()
self._wrapped_conn = None
self.read_only = read_only
@property
def _conn(self):
if not self._wrapped_conn:
self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only)
return self._wrapped_conn
def get_uri_and_template(self):
if FLAGS.libvirt_type == 'uml':
uri = FLAGS.libvirt_uri or 'uml:///system'
template_file = FLAGS.libvirt_uml_xml_template
else:
uri = FLAGS.libvirt_uri or 'qemu:///system'
template_file = FLAGS.libvirt_xml_template
return uri, template_file
def _connect(self, uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
'root',
None]
if read_only:
self._conn = libvirt.openReadOnly('qemu:///system')
else:
self._conn = libvirt.openAuth('qemu:///system', auth, 0)
if read_only:
return libvirt.openReadOnly(uri)
else:
return libvirt.openAuth(uri, auth, 0)
def list_instances(self):
return [self._conn.lookupByID(x).name()
for x in self._conn.listDomainsID()]
def destroy(self, instance):
try:
virt_dom = self._conn.lookupByName(instance.name)
@@ -110,12 +139,11 @@ class LibvirtConnection(object):
timer.start(interval=0.5, now=True)
return d
def _cleanup(self, instance):
target = os.path.abspath(instance.datamodel['basepath'])
logging.info("Deleting instance files at %s", target)
shutil.rmtree(target)
if os.path.exists(target):
shutil.rmtree(target)
@defer.inlineCallbacks
@exception.wrap_exception
@@ -142,7 +170,6 @@ class LibvirtConnection(object):
timer.start(interval=0.5, now=True)
yield d
@defer.inlineCallbacks
@exception.wrap_exception
def spawn(self, instance):
@@ -173,7 +200,6 @@ class LibvirtConnection(object):
timer.start(interval=0.5, now=True)
yield local_d
@defer.inlineCallbacks
def _create_image(self, instance, libvirt_xml):
# syntactic nicety
@@ -228,27 +254,23 @@ class LibvirtConnection(object):
yield disk.partition(
basepath('disk-raw'), basepath('disk'), bytes, execute=execute)
def basepath(self, instance, path=''):
return os.path.abspath(os.path.join(instance.datamodel['basepath'], path))
def toXml(self, instance):
# TODO(termie): cache?
logging.debug("Starting the toXML method")
libvirt_xml = open(FLAGS.libvirt_xml_template).read()
xml_info = instance.datamodel.copy()
# TODO(joshua): Make this xml express the attached disks as well
# TODO(termie): lazy lazy hack because xml is annoying
xml_info['nova'] = json.dumps(instance.datamodel.copy())
xml_info['type'] = FLAGS.libvirt_type
libvirt_xml = libvirt_xml % xml_info
libvirt_xml = self.libvirt_xml % xml_info
logging.debug("Finished the toXML method")
return libvirt_xml
def get_info(self, instance_id):
virt_dom = self._conn.lookupByName(instance_id)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
@@ -258,14 +280,7 @@ class LibvirtConnection(object):
'num_cpu': num_cpu,
'cpu_time': cpu_time}
def get_disks(self, instance_id):
"""
Note that this function takes an instance ID, not an Instance, so
that it can be called by monitor.
Returns a list of all block devices for this domain.
"""
domain = self._conn.lookupByName(instance_id)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
@@ -301,14 +316,7 @@ class LibvirtConnection(object):
return disks
def get_interfaces(self, instance_id):
"""
Note that this function takes an instance ID, not an Instance, so
that it can be called by monitor.
Returns a list of all network interfaces for this instance.
"""
domain = self._conn.lookupByName(instance_id)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
@@ -344,20 +352,10 @@ class LibvirtConnection(object):
return interfaces
def block_stats(self, instance_id, disk):
"""
Note that this function takes an instance ID, not an Instance, so
that it can be called by monitor.
"""
domain = self._conn.lookupByName(instance_id)
return domain.blockStats(disk)
def interface_stats(self, instance_id, interface):
"""
Note that this function takes an instance ID, not an Instance, so
that it can be called by monitor.
"""
domain = self._conn.lookupByName(instance_id)
return domain.interfaceStats(interface)
+150 -19
View File
@@ -19,6 +19,7 @@ A connection to XenServer or Xen Cloud Platform.
"""
import logging
import xmlrpclib
from twisted.internet import defer
from twisted.internet import task
@@ -26,20 +27,35 @@ from twisted.internet import task
from nova import exception
from nova import flags
from nova import process
from nova.auth.manager import AuthManager
from nova.compute import power_state
from nova.virt import images
XenAPI = None
FLAGS = flags.FLAGS
flags.DEFINE_string('xenapi_connection_url',
None,
'URL for connection to XenServer/Xen Cloud Platform. Required if connection_type=xenapi.')
'URL for connection to XenServer/Xen Cloud Platform.'
' Required if connection_type=xenapi.')
flags.DEFINE_string('xenapi_connection_username',
'root',
'Username for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.')
'Username for connection to XenServer/Xen Cloud Platform.'
' Used only if connection_type=xenapi.')
flags.DEFINE_string('xenapi_connection_password',
None,
'Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.')
'Password for connection to XenServer/Xen Cloud Platform.'
' Used only if connection_type=xenapi.')
XENAPI_POWER_STATE = {
'Halted' : power_state.SHUTDOWN,
'Running' : power_state.RUNNING,
'Paused' : power_state.PAUSED,
'Suspended': power_state.SHUTDOWN, # FIXME
'Crashed' : power_state.CRASHED
}
def get_connection(_):
@@ -59,7 +75,6 @@ def get_connection(_):
class XenAPIConnection(object):
def __init__(self, url, user, pw):
self._conn = XenAPI.Session(url)
self._conn.login_with_password(user, pw)
@@ -71,10 +86,40 @@ class XenAPIConnection(object):
@defer.inlineCallbacks
@exception.wrap_exception
def spawn(self, instance):
vm = self.lookup(instance.name)
vm = yield self.lookup(instance.name)
if vm is not None:
raise Exception('Attempted to create non-unique name %s' %
instance.name)
if 'bridge_name' in instance.datamodel:
network_ref = \
yield self._find_network_with_bridge(
instance.datamodel['bridge_name'])
else:
network_ref = None
if 'mac_address' in instance.datamodel:
mac_address = instance.datamodel['mac_address']
else:
mac_address = ''
user = AuthManager().get_user(instance.datamodel['user_id'])
project = AuthManager().get_project(instance.datamodel['project_id'])
vdi_uuid = yield self.fetch_image(
instance.datamodel['image_id'], user, project, True)
kernel = yield self.fetch_image(
instance.datamodel['kernel_id'], user, project, False)
ramdisk = yield self.fetch_image(
instance.datamodel['ramdisk_id'], user, project, False)
vdi_ref = yield self._conn.xenapi.VDI.get_by_uuid(vdi_uuid)
vm_ref = yield self.create_vm(instance, kernel, ramdisk)
yield self.create_vbd(vm_ref, vdi_ref, 0, True)
if network_ref:
yield self._create_vif(vm_ref, network_ref, mac_address)
yield self._conn.xenapi.VM.start(vm_ref, False, False)
def create_vm(self, instance, kernel, ramdisk):
mem = str(long(instance.datamodel['memory_kb']) * 1024)
vcpus = str(instance.datamodel['vcpus'])
rec = {
@@ -92,9 +137,9 @@ class XenAPIConnection(object):
'actions_after_reboot': 'restart',
'actions_after_crash': 'destroy',
'PV_bootloader': '',
'PV_kernel': instance.datamodel['kernel_id'],
'PV_ramdisk': instance.datamodel['ramdisk_id'],
'PV_args': '',
'PV_kernel': kernel,
'PV_ramdisk': ramdisk,
'PV_args': 'root=/dev/xvda1',
'PV_bootloader_args': '',
'PV_legacy_args': '',
'HVM_boot_policy': '',
@@ -106,9 +151,74 @@ class XenAPIConnection(object):
'user_version': '0',
'other_config': {},
}
vm = yield self._conn.xenapi.VM.create(rec)
#yield self._conn.xenapi.VM.start(vm, False, False)
logging.debug('Created VM %s...', instance.name)
vm_ref = self._conn.xenapi.VM.create(rec)
logging.debug('Created VM %s as %s.', instance.name, vm_ref)
return vm_ref
def create_vbd(self, vm_ref, vdi_ref, userdevice, bootable):
vbd_rec = {}
vbd_rec['VM'] = vm_ref
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
vbd_rec['bootable'] = bootable
vbd_rec['mode'] = 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref)
vbd_ref = self._conn.xenapi.VBD.create(vbd_rec)
logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref,
vdi_ref)
return vbd_ref
def _create_vif(self, vm_ref, network_ref, mac_address):
vif_rec = {}
vif_rec['device'] = '0'
vif_rec['network']= network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
vif_rec['MTU'] = '1500'
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref,
network_ref)
vif_ref = self._conn.xenapi.VIF.create(vif_rec)
logging.debug('Created VIF %s for VM %s, network %s.', vif_ref,
vm_ref, network_ref)
return vif_ref
def _find_network_with_bridge(self, bridge):
expr = 'field "bridge" = "%s"' % bridge
networks = self._conn.xenapi.network.get_all_records_where(expr)
if len(networks) == 1:
return networks.keys()[0]
elif len(networks) > 1:
raise Exception('Found non-unique network for bridge %s' % bridge)
else:
raise Exception('Found no network for bridge %s' % bridge)
def fetch_image(self, image, user, project, use_sr):
"""use_sr: True to put the image as a VDI in an SR, False to place
it on dom0's filesystem. The former is for VM disks, the latter for
its kernel and ramdisk (if external kernels are being used)."""
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
logging.debug("Asking xapi to fetch %s as %s" % (url, access))
fn = use_sr and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = user.secret
if use_sr:
args['add_partition'] = 'true'
return self._call_plugin('objectstore', fn, args)
def reboot(self, instance):
vm = self.lookup(instance.name)
@@ -125,9 +235,9 @@ class XenAPIConnection(object):
def get_info(self, instance_id):
vm = self.lookup(instance_id)
if vm is None:
raise Exception('instance not present %s' % instance.name)
raise Exception('instance not present %s' % instance_id)
rec = self._conn.xenapi.VM.get_record(vm)
return {'state': power_state_from_xenapi[rec['power_state']],
return {'state': XENAPI_POWER_STATE[rec['power_state']],
'max_mem': long(rec['memory_static_max']) >> 10,
'mem': long(rec['memory_dynamic_max']) >> 10,
'num_cpu': rec['VCPUs_max'],
@@ -143,10 +253,31 @@ class XenAPIConnection(object):
else:
return vms[0]
power_state_from_xenapi = {
'Halted' : power_state.RUNNING, #FIXME
'Running' : power_state.RUNNING,
'Paused' : power_state.PAUSED,
'Suspended': power_state.SHUTDOWN, # FIXME
'Crashed' : power_state.CRASHED
}
def _call_plugin(self, plugin, fn, args):
return _unwrap_plugin_exceptions(
self._conn.xenapi.host.call_plugin,
self._get_xenapi_host(), plugin, fn, args)
def _get_xenapi_host(self):
return self._conn.xenapi.session.get_this_host(self._conn.handle)
def _unwrap_plugin_exceptions(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except XenAPI.Failure, exn:
logging.debug("Got exception: %s", exn)
if (len(exn.details) == 4 and
exn.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exn.details[2] == 'Failure'):
params = None
try:
params = eval(exn.details[3])
except:
raise exn
raise XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError, exn:
logging.debug("Got exception: %s", exn)
raise
+73 -61
View File
@@ -22,12 +22,8 @@ destroying persistent storage volumes, ala EBS.
Currently uses Ata-over-Ethernet.
"""
import glob
import logging
import os
import shutil
import socket
import tempfile
from twisted.internet import defer
@@ -47,9 +43,6 @@ flags.DEFINE_string('volume_group', 'nova-volumes',
'Name for the VG that will contain exported volumes')
flags.DEFINE_string('aoe_eth_dev', 'eth0',
'Which device to export the volumes on')
flags.DEFINE_string('storage_name',
socket.gethostname(),
'name of this service')
flags.DEFINE_integer('first_shelf_id',
utils.last_octet(utils.get_my_ip()) * 10,
'AoE starting shelf_id for this service')
@@ -59,9 +52,9 @@ flags.DEFINE_integer('last_shelf_id',
flags.DEFINE_string('aoe_export_dir',
'/var/lib/vblade-persist/vblades',
'AoE directory where exports are created')
flags.DEFINE_integer('slots_per_shelf',
flags.DEFINE_integer('blades_per_shelf',
16,
'Number of AoE slots per shelf')
'Number of AoE blades per shelf')
flags.DEFINE_string('storage_availability_zone',
'nova',
'availability zone of this service')
@@ -69,18 +62,21 @@ flags.DEFINE_boolean('fake_storage', False,
'Should we make real storage volumes to attach?')
class NoMoreVolumes(exception.Error):
class NoMoreBlades(exception.Error):
pass
def get_volume(volume_id):
""" Returns a redis-backed volume object """
volume_class = Volume
if FLAGS.fake_storage:
volume_class = FakeVolume
if datastore.Redis.instance().sismember('volumes', volume_id):
return volume_class(volume_id=volume_id)
vol = volume_class.lookup(volume_id)
if vol:
return vol
raise exception.Error("Volume does not exist")
class VolumeService(service.Service):
"""
There is one VolumeNode running on each host.
@@ -91,18 +87,9 @@ class VolumeService(service.Service):
super(VolumeService, self).__init__()
self.volume_class = Volume
if FLAGS.fake_storage:
FLAGS.aoe_export_dir = tempfile.mkdtemp()
self.volume_class = FakeVolume
self._init_volume_group()
def __del__(self):
# TODO(josh): Get rid of this destructor, volumes destroy themselves
if FLAGS.fake_storage:
try:
shutil.rmtree(FLAGS.aoe_export_dir)
except Exception, err:
pass
@defer.inlineCallbacks
@validate.rangetest(size=(0, 1000))
def create_volume(self, size, user_id, project_id):
@@ -113,8 +100,6 @@ class VolumeService(service.Service):
"""
logging.debug("Creating volume of size: %s" % (size))
vol = yield self.volume_class.create(size, user_id, project_id)
datastore.Redis.instance().sadd('volumes', vol['volume_id'])
datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
logging.debug("restarting exports")
yield self._restart_exports()
defer.returnValue(vol['volume_id'])
@@ -134,23 +119,22 @@ class VolumeService(service.Service):
def delete_volume(self, volume_id):
logging.debug("Deleting volume with id of: %s" % (volume_id))
vol = get_volume(volume_id)
if vol['status'] == "attached":
if vol['attach_status'] == "attached":
raise exception.Error("Volume is still attached")
if vol['node_name'] != FLAGS.storage_name:
if vol['node_name'] != FLAGS.node_name:
raise exception.Error("Volume is not local to this node")
yield vol.destroy()
datastore.Redis.instance().srem('volumes', vol['volume_id'])
datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
defer.returnValue(True)
@defer.inlineCallbacks
def _restart_exports(self):
if FLAGS.fake_storage:
return
yield process.simple_execute("sudo vblade-persist auto all")
# NOTE(vish): this command sometimes sends output to stderr for warnings
yield process.simple_execute("sudo vblade-persist start all",
terminate_on_stderr=False)
# NOTE(vish): these commands sometimes sends output to stderr for warnings
yield process.simple_execute( "sudo vblade-persist auto all",
terminate_on_stderr=False)
yield process.simple_execute( "sudo vblade-persist start all",
terminate_on_stderr=False)
@defer.inlineCallbacks
def _init_volume_group(self):
@@ -162,6 +146,7 @@ class VolumeService(service.Service):
"sudo vgcreate %s %s" % (FLAGS.volume_group,
FLAGS.storage_dev))
class Volume(datastore.BasicModel):
def __init__(self, volume_id=None):
@@ -173,14 +158,15 @@ class Volume(datastore.BasicModel):
return self.volume_id
def default_state(self):
return {"volume_id": self.volume_id}
return {"volume_id": self.volume_id,
"node_name": "unassigned"}
@classmethod
@defer.inlineCallbacks
def create(cls, size, user_id, project_id):
volume_id = utils.generate_uid('vol')
vol = cls(volume_id)
vol['node_name'] = FLAGS.storage_name
vol['node_name'] = FLAGS.node_name
vol['size'] = size
vol['user_id'] = user_id
vol['project_id'] = project_id
@@ -226,14 +212,31 @@ class Volume(datastore.BasicModel):
self['attach_status'] = "detached"
self.save()
def save(self):
is_new = self.is_new_record()
super(Volume, self).save()
if is_new:
redis = datastore.Redis.instance()
key = self.__devices_key
# TODO(vish): these should be added by admin commands
more = redis.scard(self._redis_association_name("node",
self['node_name']))
if (not redis.exists(key) and not more):
for shelf_id in range(FLAGS.first_shelf_id,
FLAGS.last_shelf_id + 1):
for blade_id in range(FLAGS.blades_per_shelf):
redis.sadd(key, "%s.%s" % (shelf_id, blade_id))
self.associate_with("node", self['node_name'])
@defer.inlineCallbacks
def destroy(self):
try:
yield self._remove_export()
except Exception as ex:
logging.debug("Ingnoring failure to remove export %s" % ex)
pass
yield self._remove_export()
yield self._delete_lv()
self.unassociate_with("node", self['node_name'])
if self.get('shelf_id', None) and self.get('blade_id', None):
redis = datastore.Redis.instance()
key = self.__devices_key
redis.sadd(key, "%s.%s" % (self['shelf_id'], self['blade_id']))
super(Volume, self).destroy()
@defer.inlineCallbacks
@@ -245,66 +248,75 @@ class Volume(datastore.BasicModel):
yield process.simple_execute(
"sudo lvcreate -L %s -n %s %s" % (sizestr,
self['volume_id'],
FLAGS.volume_group))
FLAGS.volume_group),
check_exit_code=True)
@defer.inlineCallbacks
def _delete_lv(self):
yield process.simple_execute(
"sudo lvremove -f %s/%s" % (FLAGS.volume_group,
self['volume_id']))
self['volume_id']),
check_exit_code=True)
@property
def __devices_key(self):
return 'volume_devices:%s' % FLAGS.node_name
@defer.inlineCallbacks
def _setup_export(self):
(shelf_id, blade_id) = get_next_aoe_numbers()
redis = datastore.Redis.instance()
key = self.__devices_key
device = redis.spop(key)
if not device:
raise NoMoreBlades()
(shelf_id, blade_id) = device.split('.')
self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id)
self['shelf_id'] = shelf_id
self['blade_id'] = blade_id
self.save()
yield self._exec_export()
yield self._exec_setup_export()
@defer.inlineCallbacks
def _exec_export(self):
def _exec_setup_export(self):
yield process.simple_execute(
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
(self['shelf_id'],
self['blade_id'],
FLAGS.aoe_eth_dev,
FLAGS.volume_group,
self['volume_id']))
self['volume_id']),
check_exit_code=True)
@defer.inlineCallbacks
def _remove_export(self):
if not self.get('shelf_id', None) or not self.get('blade_id', None):
defer.returnValue(False)
yield self._exec_remove_export()
defer.returnValue(True)
@defer.inlineCallbacks
def _exec_remove_export(self):
yield process.simple_execute(
"sudo vblade-persist stop %s %s" % (self['shelf_id'],
self['blade_id']))
self['blade_id']),
check_exit_code=True)
yield process.simple_execute(
"sudo vblade-persist destroy %s %s" % (self['shelf_id'],
self['blade_id']))
self['blade_id']),
check_exit_code=True)
class FakeVolume(Volume):
def _create_lv(self):
pass
def _exec_export(self):
def _exec_setup_export(self):
fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])
f = file(fname, "w")
f.close()
def _remove_export(self):
pass
def _exec_remove_export(self):
os.unlink(os.path.join(FLAGS.aoe_export_dir, self['aoe_device']))
def _delete_lv(self):
pass
def get_next_aoe_numbers():
for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1):
aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id))
if not aoes:
blade_id = 0
else:
blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1
if blade_id < FLAGS.slots_per_shelf:
logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id)
return (shelf_id, blade_id)
raise NoMoreVolumes()
+274
View File
@@ -0,0 +1,274 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers
"""
import logging
import sys
import eventlet
import eventlet.wsgi
eventlet.patcher.monkey_patch(all=False, socket=True)
import routes
import routes.middleware
import webob.dec
import webob.exc
logging.getLogger("routes.middleware").addHandler(logging.StreamHandler())
def run_server(application, port):
"""Run a WSGI server with the given application."""
sock = eventlet.listen(('0.0.0.0', port))
eventlet.wsgi.server(sock, application)
class Application(object):
# TODO(gundlach): I think we should toss this class, now that it has no
# purpose.
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(detail='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError("You must implement __call__")
class Middleware(Application): # pylint: disable-msg=W0223
"""
Base WSGI middleware wrapper. These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
def __init__(self, application): # pylint: disable-msg=W0231
self.application = application
@webob.dec.wsgify
def __call__(self, req):
"""Override to implement middleware behavior."""
return self.application
class Debug(Middleware):
"""Helper class that can be inserted into any WSGI application chain
to get information about the request and response."""
@webob.dec.wsgify
def __call__(self, req):
print ("*" * 40) + " REQUEST ENVIRON"
for key, value in req.environ.items():
print key, "=", value
print
resp = req.get_response(self.application)
print ("*" * 40) + " RESPONSE HEADERS"
for (key, value) in resp.headers:
print key, "=", value
print
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""
Iterator that prints the contents of a wrapper string iterator
when iterated.
"""
print ("*" * 40) + "BODY"
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print
class Router(object):
"""
WSGI middleware that maps incoming requests to WSGI apps.
"""
def __init__(self, mapper):
"""
Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be a wsgi.Controller, who will route
the request to the action method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, "/svrlist", controller=sc, action="list")
# Actions are all implicitly defined
mapper.resource("server", "servers", controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify
def __call__(self, req):
"""
Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@webob.dec.wsgify
def _dispatch(self, req):
"""
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Controller(object):
"""
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon itself. All action methods
must, in addition to their normal parameters, accept a 'req' argument
which is the incoming webob.Request.
"""
@webob.dec.wsgify
def __call__(self, req):
"""
Call the method specified in req.environ by RoutesMiddleware.
"""
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
method = getattr(self, action)
del arg_dict['controller']
del arg_dict['action']
arg_dict['req'] = req
return method(**arg_dict)
class Serializer(object):
"""
Serializes a dictionary to a Content Type specified by a WSGI environment.
"""
def __init__(self, environ, metadata=None):
"""
Create a serializer based on the given WSGI environment.
'metadata' is an optional dict mapping MIME types to information
needed to serialize a dictionary to that type.
"""
self.environ = environ
self.metadata = metadata or {}
def to_content_type(self, data):
"""
Serialize a dictionary into a string. The format of the string
will be decided based on the Content Type requested in self.environ:
by Accept: header, or by URL suffix.
"""
mimetype = 'application/xml'
# TODO(gundlach): determine mimetype from request
if mimetype == 'application/json':
import json
return json.dumps(data)
elif mimetype == 'application/xml':
metadata = self.metadata.get('application/xml', {})
# We expect data to contain a single key which is the XML root.
root_key = data.keys()[0]
from xml.dom import minidom
doc = minidom.Document()
node = self._to_xml_node(doc, metadata, root_key, data[root_key])
return node.toprettyxml(indent=' ')
else:
return repr(data)
def _to_xml_node(self, doc, metadata, nodename, data):
result = doc.createElement(nodename)
if type(data) is list:
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
elif type(data) is dict:
attrs = metadata.get('attributes', {}).get(nodename, {})
for k,v in data.items():
if k in attrs:
result.setAttribute(k, str(v))
else:
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else: # atom
node = doc.createTextNode(str(data))
result.appendChild(node)
return result
+6
View File
@@ -0,0 +1,6 @@
This directory contains files that are required for the XenAPI support. They
should be installed in the XenServer / Xen Cloud Platform domain 0.
Also, you need to
chmod u+x /etc/xapi.d/plugins/objectstore
@@ -0,0 +1,231 @@
#!/usr/bin/env python
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# XenAPI plugin for fetching images from nova-objectstore.
#
import base64
import errno
import hmac
import os
import os.path
import sha
import time
import urlparse
import XenAPIPlugin
from pluginlib_nova import *
configure_logging('objectstore')
KERNEL_DIR = '/boot/guest'
DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024
SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
def get_vdi(session, args):
src_url = exists(args, 'src_url')
username = exists(args, 'username')
password = exists(args, 'password')
add_partition = validate_bool(args, 'add_partition', 'false')
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
sr = find_sr(session)
if sr is None:
raise Exception('Cannot find SR to write VDI to')
virtual_size = \
get_content_length(proto, netloc, url_path, username, password)
if virtual_size < 0:
raise Exception('Cannot get VDI size')
vdi_size = virtual_size
if add_partition:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
vdi = create_vdi(session, sr, src_url, vdi_size, False)
with_vdi_in_dom0(session, vdi, False,
lambda dev: get_vdi_(proto, netloc, url_path,
username, password, add_partition,
virtual_size, '/dev/%s' % dev))
return session.xenapi.VDI.get_uuid(vdi)
def get_vdi_(proto, netloc, url_path, username, password, add_partition,
virtual_size, dest):
if add_partition:
write_partition(virtual_size, dest)
offset = add_partition and MBR_SIZE_BYTES or 0
get(proto, netloc, url_path, username, password, dest, offset)
def write_partition(virtual_size, dest):
mbr_last = MBR_SIZE_SECTORS - 1
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
logging.debug('Writing partition table %d %d to %s...',
primary_first, primary_last, dest)
result = os.system('parted --script %s mklabel msdos' % dest)
if result != 0:
raise Exception('Failed to mklabel')
result = os.system('parted --script %s mkpart primary %ds %ds' %
(dest, primary_first, primary_last))
if result != 0:
raise Exception('Failed to mkpart')
logging.debug('Writing partition table %s done.', dest)
def find_sr(session):
host = get_this_host(session)
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_rec = session.xenapi.SR.get_record(sr)
if not ('i18n-key' in sr_rec['other_config'] and
sr_rec['other_config']['i18n-key'] == 'local-storage'):
continue
for pbd in sr_rec['PBDs']:
pbd_rec = session.xenapi.PBD.get_record(pbd)
if pbd_rec['host'] == host:
return sr
return None
def get_kernel(session, args):
src_url = exists(args, 'src_url')
username = exists(args, 'username')
password = exists(args, 'password')
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
dest = os.path.join(KERNEL_DIR, url_path[1:])
# Paranoid check against people using ../ to do rude things.
if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR:
raise Exception('Illegal destination %s %s', (url_path, dest))
dirname = os.path.dirname(dest)
try:
os.makedirs(dirname)
except os.error, e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(dirname):
raise Exception('Cannot make directory %s', dirname)
try:
os.remove(dest)
except:
pass
get(proto, netloc, url_path, username, password, dest, 0)
return dest
def get_content_length(proto, netloc, url_path, username, password):
headers = make_headers('HEAD', url_path, username, password)
return with_http_connection(
proto, netloc,
lambda conn: get_content_length_(url_path, headers, conn))
def get_content_length_(url_path, headers, conn):
conn.request('HEAD', url_path, None, headers)
response = conn.getresponse()
if response.status != 200:
raise Exception('%d %s' % (response.status, response.reason))
return long(response.getheader('Content-Length', -1))
def get(proto, netloc, url_path, username, password, dest, offset):
headers = make_headers('GET', url_path, username, password)
download(proto, netloc, url_path, headers, dest, offset)
def make_headers(verb, url_path, username, password):
headers = {}
headers['Date'] = \
time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
headers['Authorization'] = \
'AWS %s:%s' % (username,
s3_authorization(verb, url_path, password, headers))
return headers
def s3_authorization(verb, path, password, headers):
sha1 = hmac.new(password, digestmod=sha)
sha1.update(plaintext(verb, path, headers))
return base64.encodestring(sha1.digest()).strip()
def plaintext(verb, path, headers):
return '%s\n\n\n%s\n%s' % (verb,
"\n".join([headers[h] for h in headers]),
path)
def download(proto, netloc, url_path, headers, dest, offset):
with_http_connection(
proto, netloc,
lambda conn: download_(url_path, dest, offset, headers, conn))
def download_(url_path, dest, offset, headers, conn):
conn.request('GET', url_path, None, headers)
response = conn.getresponse()
if response.status != 200:
raise Exception('%d %s' % (response.status, response.reason))
length = response.getheader('Content-Length', -1)
with_file(
dest, 'a',
lambda dest_file: download_all(response, length, dest_file, offset))
def download_all(response, length, dest_file, offset):
dest_file.seek(offset)
i = 0
while True:
buf = response.read(DOWNLOAD_CHUNK_SIZE)
if buf:
dest_file.write(buf)
else:
return
i += len(buf)
if length != -1 and i >= length:
return
if __name__ == '__main__':
XenAPIPlugin.dispatch({'get_vdi': get_vdi,
'get_kernel': get_kernel})
+216
View File
@@ -0,0 +1,216 @@
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Helper functions for the Nova xapi plugins. In time, this will merge
# with the pluginlib.py shipped with xapi, but for now, that file is not
# very stable, so it's easiest just to have a copy of all the functions
# that we need.
#
import httplib
import logging
import logging.handlers
import re
import time
##### Logging setup
def configure_logging(name):
log = logging.getLogger()
log.setLevel(logging.DEBUG)
sysh = logging.handlers.SysLogHandler('/dev/log')
sysh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name)
sysh.setFormatter(formatter)
log.addHandler(sysh)
##### Exceptions
class PluginError(Exception):
"""Base Exception class for all plugin errors."""
def __init__(self, *args):
Exception.__init__(self, *args)
class ArgumentError(PluginError):
"""Raised when required arguments are missing, argument values are invalid,
or incompatible arguments are given.
"""
def __init__(self, *args):
PluginError.__init__(self, *args)
##### Helpers
def ignore_failure(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except XenAPI.Failure, e:
logging.error('Ignoring XenAPI.Failure %s', e)
return None
##### Argument validation
ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$')
def validate_exists(args, key, default=None):
"""Validates that a string argument to a RPC method call is given, and
matches the shell-safe regex, with an optional default value in case it
does not exist.
Returns the string.
"""
if key in args:
if len(args[key]) == 0:
raise ArgumentError('Argument %r value %r is too short.' % (key, args[key]))
if not ARGUMENT_PATTERN.match(args[key]):
raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key]))
if args[key][0] == '-':
raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key]))
return args[key]
elif default is not None:
return default
else:
raise ArgumentError('Argument %s is required.' % key)
def validate_bool(args, key, default=None):
"""Validates that a string argument to a RPC method call is a boolean string,
with an optional default value in case it does not exist.
Returns the python boolean value.
"""
value = validate_exists(args, key, default)
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value))
def exists(args, key):
"""Validates that a freeform string argument to a RPC method call is given.
Returns the string.
"""
if key in args:
return args[key]
else:
raise ArgumentError('Argument %s is required.' % key)
def optional(args, key):
"""If the given key is in args, return the corresponding value, otherwise
return None"""
return key in args and args[key] or None
def get_this_host(session):
return session.xenapi.session.get_this_host(session.handle)
def get_domain_0(session):
this_host_ref = get_this_host(session)
expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref
return session.xenapi.VM.get_all_records_where(expr).keys()[0]
def create_vdi(session, sr_ref, name_label, virtual_size, read_only):
vdi_ref = session.xenapi.VDI.create(
{ 'name_label': name_label,
'name_description': '',
'SR': sr_ref,
'virtual_size': str(virtual_size),
'type': 'User',
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
'other_config': {},
'sm_config': {},
'tags': [] })
logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label,
virtual_size, read_only, sr_ref)
return vdi_ref
def with_vdi_in_dom0(session, vdi, read_only, f):
dom0 = get_domain_0(session)
vbd_rec = {}
vbd_rec['VM'] = dom0
vbd_rec['VDI'] = vdi
vbd_rec['userdevice'] = 'autodetect'
vbd_rec['bootable'] = False
vbd_rec['mode'] = read_only and 'RO' or 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
logging.debug('Creating VBD for VDI %s ... ', vdi)
vbd = session.xenapi.VBD.create(vbd_rec)
logging.debug('Creating VBD for VDI %s done.', vdi)
try:
logging.debug('Plugging VBD %s ... ', vbd)
session.xenapi.VBD.plug(vbd)
logging.debug('Plugging VBD %s done.', vbd)
return f(session.xenapi.VBD.get_device(vbd))
finally:
logging.debug('Destroying VBD for VDI %s ... ', vdi)
vbd_unplug_with_retry(session, vbd)
ignore_failure(session.xenapi.VBD.destroy, vbd)
logging.debug('Destroying VBD for VDI %s done.', vdi)
def vbd_unplug_with_retry(session, vbd):
"""Call VBD.unplug on the given VBD, with a retry if we get
DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
seeing the device still in use, even when all processes using the device
should be dead."""
while True:
try:
session.xenapi.VBD.unplug(vbd)
logging.debug('VBD.unplug successful first time.')
return
except XenAPI.Failure, e:
if (len(e.details) > 0 and
e.details[0] == 'DEVICE_DETACH_REJECTED'):
logging.debug('VBD.unplug rejected: retrying...')
time.sleep(1)
elif (len(e.details) > 0 and
e.details[0] == 'DEVICE_ALREADY_DETACHED'):
logging.debug('VBD.unplug successful eventually.')
return
else:
logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e)
return
def with_http_connection(proto, netloc, f):
conn = (proto == 'https' and
httplib.HTTPSConnection(netloc) or
httplib.HTTPConnection(netloc))
try:
return f(conn)
finally:
conn.close()
def with_file(dest_path, mode, f):
dest = open(dest_path, mode)
try:
return f(dest)
finally:
dest.close()
+13
View File
@@ -1,4 +1,17 @@
[Messages Control]
disable-msg=C0103
# TODOs in code comments are fine...
disable-msg=W0511
# *args and **kwargs are fine
disable-msg=W0142
[Basic]
# Variables can be 1 to 31 characters long, with
# lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$
# Method names should be at least 3 characters long
# and be lowecased with underscores
method-rgx=[a-z_][a-z0-9_]{2,50}$
[Design]
+6 -4
View File
@@ -38,11 +38,11 @@ Due to our use of multiprocessing it we frequently get some ignorable
'Interrupted system call' exceptions after test completion.
"""
import __main__
import os
import sys
from twisted.scripts import trial as trial_script
from nova import datastore
@@ -54,21 +54,23 @@ from nova.tests.auth_unittest import *
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
from nova.tests.compute_unittest import *
from nova.tests.flags_unittest import *
from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
from nova.tests.objectstore_unittest import *
from nova.tests.process_unittest import *
from nova.tests.rpc_unittest import *
from nova.tests.validator_unittest import *
from nova.tests.volume_unittest import *
FLAGS = flags.FLAGS
flags.DEFINE_bool('flush_db', True,
'Flush the database before running fake tests')
flags.DEFINE_string('tests_stderr', 'run_tests.err.log',
'Path to where to pipe STDERR during test runs. Default = "run_tests.err.log"')
'Path to where to pipe STDERR during test runs.'
' Default = "run_tests.err.log"')
if __name__ == '__main__':
OptionsClass = twistd.WrapTwistedOptions(trial_script.Options)
+59 -6
View File
@@ -1,13 +1,66 @@
#!/bin/bash
#!/bin/bash
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Nova's test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -h, --help Print this usage message"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_options {
array=$1
elements=${#array[@]}
for (( x=0;x<$elements;x++)); do
process_option ${array[${x}]}
done
}
function process_option {
option=$1
case $option in
-h|--help) usage;;
-V|--virtual-env) let always_venv=1; let never_venv=0;;
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
esac
}
venv=.nova-venv
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
options=("$@")
process_options $options
if [ $never_venv -eq 1 ]; then
# Just run the test suites in current environment
python run_tests.py
exit
fi
if [ -e ${venv} ]; then
${with_venv} python run_tests.py
${with_venv} python run_tests.py $@
else
echo "You need to install the Nova virtualenv before you can run this."
echo ""
echo "Please run tools/install_venv.py"
exit 1
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py
else
python run_tests.py
exit
fi
fi
${with_venv} python run_tests.py $@
fi
+57 -22
View File
@@ -1,3 +1,23 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Installation script for Nova's development virtualenv
"""
@@ -7,64 +27,79 @@ import subprocess
import sys
ROOT = os.path.dirname(os.path.dirname(__file__))
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.nova-venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz'
def die(message, *args):
print >>sys.stderr, message % args
sys.exit(1)
def run_command(cmd, redirect_output=True, check_exit_code=True):
# Useful for debugging:
#print >>sys.stderr, ' '.join(cmd)
def run_command(cmd, redirect_output=True, check_exit_code=False):
"""
Runs a command in an out-of-process shell, returning the
output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, stdout=stdout)
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return output
def check_dependencies():
"""Make sure pip and virtualenv are on the path."""
# Perl also has a pip program. Hopefully the user has installed the right one!
print 'Checking for pip...',
if not run_command(['which', 'pip'], check_exit_code=False).strip():
die('ERROR: pip not found.\n\nNova development requires pip,'
' please install it using your favorite package management tool '
' (e.g. "sudo apt-get install python-pip")')
print 'done.'
HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], check_exit_code=False).strip())
HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], check_exit_code=False).strip())
print 'Checking for virtualenv...',
if not run_command(['which', 'virtualenv'], check_exit_code=False).strip():
die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
' please install it using your favorite package management tool '
' (e.g. "sudo easy_install virtualenv")')
def check_dependencies():
"""Make sure virtualenv is in the path."""
if not HAS_VIRTUALENV:
print 'not found.'
# Try installing it via easy_install...
if HAS_EASY_INSTALL:
print 'Installing virtualenv via easy_install...',
if not run_command(['which', 'easy_install']):
die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
' please install it using your favorite package management tool')
print 'done.'
print 'done.'
def create_virtualenv(venv=VENV):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
print 'done.'
print 'Installing pip in virtualenv...',
if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip():
die("Failed to install pip.")
print 'done.'
def install_dependencies(venv=VENV):
print 'Installing dependencies with pip (this can take a while)...'
run_command(['pip', 'install', '-E', venv, '-r', PIP_REQUIRES],
run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES],
redirect_output=False)
run_command(['pip', 'install', '-E', venv, TWISTED_NOVA],
run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, TWISTED_NOVA],
redirect_output=False)
# Tell the virtual env how to "import nova"
pathfile=os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth")
f=open(pathfile, 'w')
f.write("%s\n" % ROOT)
def print_help():
help = """
Nova development environment setup is complete.
+5
View File
@@ -1,14 +1,19 @@
pep8==0.5.0
pylint==0.19
IPy==0.70
M2Crypto==0.20.2
amqplib==0.6.1
anyjson==0.2.4
boto==2.0b1
carrot==0.10.5
eventlet==0.9.10
lockfile==0.8
python-daemon==1.5.5
python-gflags==1.3
redis==2.0.0
routes==1.12.3
tornado==1.0
webob==0.9.8
wsgiref==0.1.2
zope.interface==3.6.1
mox==0.5.0