From 4044051266d97ffe05fbe75b642759d2e604da4d Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Mon, 26 Jul 2010 23:19:51 -0400 Subject: [PATCH 001/101] Share my updates to the Rackspace API. --- bin/nova-rsapi | 13 +--- exercise_rsapi.py | 51 ------------- nova/endpoint/rackspace.py | 147 +++++++++++++++++++++++++++---------- 3 files changed, 109 insertions(+), 102 deletions(-) delete mode 100644 exercise_rsapi.py diff --git a/bin/nova-rsapi b/bin/nova-rsapi index c2f2c9d703..a529fc6697 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -37,23 +37,12 @@ FLAGS = flags.FLAGS flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') def main(_argv): - user_manager = users.UserManager() - api_instance = rackspace.Api(user_manager) - conn = rpc.Connection.instance() - rpc_consumer = rpc.AdapterConsumer(connection=conn, - topic=FLAGS.cloud_topic, - proxy=api_instance) - -# TODO: fire rpc response listener (without attach to tornado) -# io_inst = ioloop.IOLoop.instance() -# _injected = consumer.attach_to_tornado(io_inst) - + api_instance = rackspace.Api() http_server = simple_server.WSGIServer(('0.0.0.0', FLAGS.cc_port), simple_server.WSGIRequestHandler) http_server.set_app(api_instance.handler) logging.debug('Started HTTP server on port %i' % FLAGS.cc_port) while True: http_server.handle_request() -# io_inst.start() if __name__ == '__main__': utils.default_flagfile() diff --git a/exercise_rsapi.py b/exercise_rsapi.py deleted file mode 100644 index 20589b9cb1..0000000000 --- a/exercise_rsapi.py +++ /dev/null @@ -1,51 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import cloudservers - -class IdFake: - def __init__(self, id): - self.id = id - -# to get your access key: -# from nova.auth import users -# users.UserManger.instance().get_users()[0].access -rscloud = cloudservers.CloudServers( - 'admin', - '6cca875e-5ab3-4c60-9852-abf5c5c60cc6' - ) -rscloud.client.AUTH_URL = 'http://localhost:8773/v1.0' - - -rv = rscloud.servers.list() -print "SERVERS: %s" % rv - -if len(rv) == 0: - server = rscloud.servers.create( - "test-server", - IdFake("ami-tiny"), - IdFake("m1.tiny") - ) - print "LAUNCH: %s" % server -else: - server = rv[0] - print "Server to kill: %s" % server - -raw_input("press enter key to kill the server") - -server.delete() diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index 29a077b244..b561212f58 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -48,82 +48,128 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') -# TODO(todd): subclass Exception so we can bubble meaningful errors +class Unauthorized(Exception): + pass + +class NotFound(Exception): + pass class Api(object): - def __init__(self, rpc_mechanism): + def __init__(self): + """build endpoints here""" self.controllers = { "v1.0": RackspaceAuthenticationApi(), "servers": RackspaceCloudServerApi() } - self.rpc_mechanism = rpc_mechanism def handler(self, environ, responder): + """ + This is the entrypoint from wsgi. Read PEP 333 and wsgi.org for + more intormation. The key points are responder is a callback that + needs to run before you return, and takes two arguments, response + code string ("200 OK") and headers (["X-How-Cool-Am-I: Ultra-Suede"]) + and the return value is the body of the response. + """ environ['nova.context'] = self.build_context(environ) controller, path = wsgi.Util.route( environ['PATH_INFO'], self.controllers ) + logging.debug("Route %s to %s", str(path), str(controller)) if not controller: - # TODO(todd): Exception (404) - raise Exception("Missing Controller") - rv = controller.process(path, environ) - if type(rv) is tuple: - responder(rv[0], rv[1]) - rv = rv[2] - else: - responder("200 OK", []) - return rv + responder("404 Not Found", []) + return "" + try: + rv = controller.process(path, environ) + if type(rv) is tuple: + responder(rv[0], rv[1]) + rv = rv[2] + else: + responder("200 OK", []) + return rv + except Unauthorized: + responder("401 Unauthorized", []) + return "" + except NotFound: + responder("404 Not Found", []) + return "" + def build_context(self, env): rv = {} if env.has_key("HTTP_X_AUTH_TOKEN"): + # TODO(todd): once we make an actual unique token, this will change rv['user'] = users.UserManager.instance().get_user_from_access_key( - env['HTTP_X_AUTH_TOKEN'] - ) + env['HTTP_X_AUTH_TOKEN']) if rv['user']: rv['project'] = users.UserManager.instance().get_project( - rv['user'].name - ) + rv['user'].name) return rv class RackspaceApiEndpoint(object): def process(self, path, env): + """ + Main entrypoint for all controllers (what gets run by the wsgi handler). + Check authentication based on key, raise Unauthorized if invalid. + + Select the most appropriate action based on request type GET, POST, etc, + then pass it through to the implementing controller. Defalut to GET if + the implementing child doesn't respond to a particular type. + """ if not self.check_authentication(env): - # TODO(todd): Exception (Unauthorized) - raise Exception("Unable to authenticate") + raise Unauthorized("Unable to authenticate") - if len(path) == 0: + method = env['REQUEST_METHOD'].lower() + callback = getattr(self, method, None) + if not callback: + callback = getattr(self, "get") + logging.debug("%s processing %s with %s", self, method, callback) + return callback(path, env) + + def get(self, path, env): + """ + The default GET will look at the path and call an appropriate + action within this controller based on the the structure of the path. + + Given the following path lengths (with the first part stripped of by + router, as it is the controller name): + = 0 -> index + = 1 -> first component (/servers/details -> details) + >= 2 -> second path component (/servers/ID/ips/* -> ips) + + This should return + A String if 200 OK and no additional headers + (CODE, HEADERS, BODY) for custom response code and headers + """ + if len(path) == 0 and hasattr(self, "index"): + logging.debug("%s running index", self) return self.index(env) + if len(path) >= 2: + action = path[1] + else: + action = path.pop(0) - action = path.pop(0) + logging.debug("%s running action %s", self, action) if hasattr(self, action): method = getattr(self, action) return method(path, env) else: - # TODO(todd): Exception (404) - raise Exception("Missing method %s" % path[0]) + raise NotFound("Missing method %s" % path[0]) def check_authentication(self, env): - if hasattr(self, "process_without_authentication") \ - and getattr(self, "process_without_authentication"): - return True if not env['nova.context']['user']: return False return True -class RackspaceAuthenticationApi(RackspaceApiEndpoint): - - def __init__(self): - self.process_without_authentication = True +class RackspaceAuthenticationApi(object): # TODO(todd): make a actual session with a unique token # just pass the auth key back through for now - def index(self, env): + def index(self, _path, env): response = '204 No Content' headers = [ ('X-Server-Management-Url', 'http://%s' % env['HTTP_HOST']), @@ -141,20 +187,25 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint): self.instdir = model.InstanceDirectory() self.network = network.PublicNetworkController() + def post(self, path, env): + if len(path) == 0: + return self.launch_server(env) + + def delete(self, path_parts, env): + if self.delete_server(path_parts[0]): + return ("202 Accepted", [], "") + else: + return ("404 Not Found", [], + "Did not find image, or it was not in a running state") + + def index(self, env): - if env['REQUEST_METHOD'] == 'GET': - return self.detail(env) - elif env['REQUEST_METHOD'] == 'POST': - return self.launch_server(env) + return self.detail(env) def detail(self, args, env): - value = { - "servers": - [] - } + value = {"servers": []} for inst in self.instdir.all: value["servers"].append(self.instance_details(inst)) - return json.dumps(value) ## @@ -227,3 +278,21 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint): "args": {"instance_id": inst.instance_id} } ) + + def delete_server(self, instance_id): + owner_hostname = self.host_for_instance(instance_id) + # it isn't launched? + if not owner_hostname: + return None + rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname) + rpc.cast(rpc_transport, + {"method": "reboot_instance", + "args": {"instance_id": instance_id}}) + return True + + def host_for_instance(self, instance_id): + instance = model.Instance.lookup(instance_id) + if not instance: + return None + return instance["node_name"] + From 8dad7d1d45599880571689d62857cb437dea182e Mon Sep 17 00:00:00 2001 From: "Joel Moore joelbm24@gmail.com" <> Date: Tue, 27 Jul 2010 15:35:20 -0700 Subject: [PATCH 002/101] fixed path to keys directory --- nova/endpoint/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 8c6c055668..9d81d3bba4 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -86,7 +86,7 @@ class CloudController(object): """ Ensure the keychains and folders exist. """ # Create keys folder, if it doesn't exist if not os.path.exists(FLAGS.keys_path): - os.makedirs(os.path.abspath(FLAGS.keys_path)) + os.makedirs(FLAGS.keys_path) # Gen root CA, if we don't have one root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) if not os.path.exists(root_ca_path): From 9dfdbe11b89ae1f490257b6f687c2a5f5c90bb14 Mon Sep 17 00:00:00 2001 From: andy Date: Thu, 29 Jul 2010 19:53:00 +0200 Subject: [PATCH 003/101] Add some useful features to our flags * No longer dies if there are unknown flags. * Allows you to declare that you will use a flag from another file * Allows you to import new flags at runtime and reparses the original arguments to fill them once they are accessed. --- nova/flags.py | 133 +++++++++++++++++++++++++++++++++-- nova/tests/declare_flags.py | 5 ++ nova/tests/flags_unittest.py | 94 +++++++++++++++++++++++++ nova/tests/runtime_flags.py | 5 ++ run_tests.py | 1 + run_tests.sh | 2 +- 6 files changed, 233 insertions(+), 7 deletions(-) create mode 100644 nova/tests/declare_flags.py create mode 100644 nova/tests/flags_unittest.py create mode 100644 nova/tests/runtime_flags.py diff --git a/nova/flags.py b/nova/flags.py index f35f5fa10c..2ec7d9c9f7 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -21,16 +21,137 @@ Package-level global flags are defined here, the rest are defined where they're used. """ +import getopt import socket +import sys + +import gflags -from gflags import * +class FlagValues(gflags.FlagValues): + def __init__(self): + gflags.FlagValues.__init__(self) + self.__dict__['__dirty'] = [] + self.__dict__['__was_already_parsed'] = False + self.__dict__['__stored_argv'] = [] + + def __call__(self, argv): + # We're doing some hacky stuff here so that we don't have to copy + # out all the code of the original verbatim and then tweak a few lines. + # We're hijacking the output of getopt so we can still return the + # leftover args at the end + sneaky_unparsed_args = {"value": None} + original_argv = list(argv) + + if self.IsGnuGetOpt(): + orig_getopt = getattr(getopt, 'gnu_getopt') + orig_name = 'gnu_getopt' + else: + orig_getopt = getattr(getopt, 'getopt') + orig_name = 'getopt' + + def _sneaky(*args, **kw): + optlist, unparsed_args = orig_getopt(*args, **kw) + sneaky_unparsed_args['value'] = unparsed_args + return optlist, unparsed_args + + try: + setattr(getopt, orig_name, _sneaky) + args = gflags.FlagValues.__call__(self, argv) + except gflags.UnrecognizedFlagError: + # Undefined args were found, for now we don't care so just + # act like everything went well + # (these three lines are copied pretty much verbatim from the end + # of the __call__ function we are wrapping) + unparsed_args = sneaky_unparsed_args['value'] + if unparsed_args: + if self.IsGnuGetOpt(): + args = argv[:1] + unparsed + else: + args = argv[:1] + original_argv[-len(unparsed_args):] + else: + args = argv[:1] + finally: + setattr(getopt, orig_name, orig_getopt) + + # Store the arguments for later, we'll need them for new flags + # added at runtime + self.__dict__['__stored_argv'] = original_argv + self.__dict__['__was_already_parsed'] = True + self.ClearDirty() + return args + + def SetDirty(self, name): + """Mark a flag as dirty so that accessing it will case a reparse.""" + self.__dict__['__dirty'].append(name) + + def IsDirty(self, name): + return name in self.__dict__['__dirty'] + + def ClearDirty(self): + self.__dict__['__is_dirty'] = [] + + def WasAlreadyParsed(self): + return self.__dict__['__was_already_parsed'] + + def ParseNewFlags(self): + if '__stored_argv' not in self.__dict__: + return + new_flags = FlagValues() + for k in self.__dict__['__dirty']: + new_flags[k] = gflags.FlagValues.__getitem__(self, k) + + new_flags(self.__dict__['__stored_argv']) + for k in self.__dict__['__dirty']: + setattr(self, k, getattr(new_flags, k)) + self.ClearDirty() + + def __setitem__(self, name, flag): + gflags.FlagValues.__setitem__(self, name, flag) + if self.WasAlreadyParsed(): + self.SetDirty(name) + + def __getitem__(self, name): + if self.IsDirty(name): + self.ParseNewFlags() + return gflags.FlagValues.__getitem__(self, name) + + def __getattr__(self, name): + if self.IsDirty(name): + self.ParseNewFlags() + return gflags.FlagValues.__getattr__(self, name) + + +FLAGS = FlagValues() + + +def party_wrapper(func): + def _wrapped(*args, **kw): + kw.setdefault('flag_values', FLAGS) + func(*args, **kw) + _wrapped.func_name = func.func_name + return _wrapped + + +DEFINE_string = party_wrapper(gflags.DEFINE_string) +DEFINE_integer = party_wrapper(gflags.DEFINE_integer) +DEFINE_bool = party_wrapper(gflags.DEFINE_bool) +DEFINE_boolean = party_wrapper(gflags.DEFINE_boolean) +DEFINE_float = party_wrapper(gflags.DEFINE_float) +DEFINE_enum = party_wrapper(gflags.DEFINE_enum) +DEFINE_list = party_wrapper(gflags.DEFINE_list) +DEFINE_spaceseplist = party_wrapper(gflags.DEFINE_spaceseplist) +DEFINE_multistring = party_wrapper(gflags.DEFINE_multistring) +DEFINE_multi_int = party_wrapper(gflags.DEFINE_multi_int) + + +def DECLARE(name, module_string, flag_values=FLAGS): + if module_string not in sys.modules: + __import__(module_string, globals(), locals()) + if name not in flag_values: + raise gflags.UnrecognizedFlag( + "%s not defined by %s" % (name, module_string)) -# This keeps pylint from barfing on the imports -FLAGS = FLAGS -DEFINE_string = DEFINE_string -DEFINE_integer = DEFINE_integer -DEFINE_bool = DEFINE_bool # __GLOBAL FLAGS ONLY__ # Define any app-specific flags in their own files, docs at: diff --git a/nova/tests/declare_flags.py b/nova/tests/declare_flags.py new file mode 100644 index 0000000000..f7c91f9ddb --- /dev/null +++ b/nova/tests/declare_flags.py @@ -0,0 +1,5 @@ +from nova import flags + +FLAGS = flags.FLAGS + +flags.DEFINE_integer('answer', 42, 'test flag') diff --git a/nova/tests/flags_unittest.py b/nova/tests/flags_unittest.py new file mode 100644 index 0000000000..44da94d048 --- /dev/null +++ b/nova/tests/flags_unittest.py @@ -0,0 +1,94 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +from twisted.internet import defer +from twisted.internet import reactor +from xml.etree import ElementTree + +from nova import exception +from nova import flags +from nova import process +from nova import test +from nova import utils + + +class FlagsTestCase(test.TrialTestCase): + def setUp(self): + super(FlagsTestCase, self).setUp() + self.FLAGS = flags.FlagValues() + self.global_FLAGS = flags.FLAGS + + def test_define(self): + self.assert_('string' not in self.FLAGS) + self.assert_('int' not in self.FLAGS) + self.assert_('false' not in self.FLAGS) + self.assert_('true' not in self.FLAGS) + + flags.DEFINE_string('string', 'default', 'desc', flag_values=self.FLAGS) + flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS) + flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS) + flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS) + + self.assert_(self.FLAGS['string']) + self.assert_(self.FLAGS['int']) + self.assert_(self.FLAGS['false']) + self.assert_(self.FLAGS['true']) + self.assertEqual(self.FLAGS.string, 'default') + self.assertEqual(self.FLAGS.int, 1) + self.assertEqual(self.FLAGS.false, False) + self.assertEqual(self.FLAGS.true, True) + + argv = ['flags_test', + '--string', 'foo', + '--int', '2', + '--false', + '--notrue'] + + self.FLAGS(argv) + self.assertEqual(self.FLAGS.string, 'foo') + self.assertEqual(self.FLAGS.int, 2) + self.assertEqual(self.FLAGS.false, True) + self.assertEqual(self.FLAGS.true, False) + + def test_declare(self): + self.assert_('answer' not in self.global_FLAGS) + flags.DECLARE('answer', 'nova.tests.declare_flags') + self.assert_('answer' in self.global_FLAGS) + self.assertEqual(self.global_FLAGS.answer, 42) + + # Make sure we don't overwrite anything + self.global_FLAGS.answer = 256 + self.assertEqual(self.global_FLAGS.answer, 256) + flags.DECLARE('answer', 'nova.tests.declare_flags') + self.assertEqual(self.global_FLAGS.answer, 256) + + def test_runtime_and_unknown_flags(self): + self.assert_('runtime_answer' not in self.global_FLAGS) + + argv = ['flags_test', '--runtime_answer=60', 'extra_arg'] + args = self.global_FLAGS(argv) + self.assertEqual(len(args), 2) + self.assertEqual(args[1], 'extra_arg') + + self.assert_('runtime_answer' not in self.global_FLAGS) + + import nova.tests.runtime_flags + + self.assert_('runtime_answer' in self.global_FLAGS) + self.assertEqual(self.global_FLAGS.runtime_answer, 60) diff --git a/nova/tests/runtime_flags.py b/nova/tests/runtime_flags.py new file mode 100644 index 0000000000..a2cc4738a9 --- /dev/null +++ b/nova/tests/runtime_flags.py @@ -0,0 +1,5 @@ +from nova import flags + +FLAGS = flags.FLAGS + +flags.DEFINE_integer('runtime_answer', 54, 'test flag') diff --git a/run_tests.py b/run_tests.py index 5a8966f026..14019b659b 100644 --- a/run_tests.py +++ b/run_tests.py @@ -54,6 +54,7 @@ from nova.tests.auth_unittest import * from nova.tests.api_unittest import * from nova.tests.cloud_unittest import * from nova.tests.compute_unittest import * +from nova.tests.flags_unittest import * from nova.tests.model_unittest import * from nova.tests.network_unittest import * from nova.tests.objectstore_unittest import * diff --git a/run_tests.sh b/run_tests.sh index 1bf3d1a791..9b2de7aea6 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -4,7 +4,7 @@ venv=.nova-venv with_venv=tools/with_venv.sh if [ -e ${venv} ]; then - ${with_venv} python run_tests.py + ${with_venv} python run_tests.py $@ else echo "You need to install the Nova virtualenv before you can run this." echo "" From fe64d63240ee05e972731dbd97f76fcac3e1c5aa Mon Sep 17 00:00:00 2001 From: andy Date: Thu, 29 Jul 2010 20:05:22 +0200 Subject: [PATCH 004/101] strip out some useless imports --- nova/tests/flags_unittest.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/nova/tests/flags_unittest.py b/nova/tests/flags_unittest.py index 44da94d048..d49d5dc43a 100644 --- a/nova/tests/flags_unittest.py +++ b/nova/tests/flags_unittest.py @@ -16,16 +16,9 @@ # License for the specific language governing permissions and limitations # under the License. -import logging -from twisted.internet import defer -from twisted.internet import reactor -from xml.etree import ElementTree - from nova import exception from nova import flags -from nova import process from nova import test -from nova import utils class FlagsTestCase(test.TrialTestCase): From 40b2bbcfe6274aca9fd4361c56b2b042ba22e3c2 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Mon, 2 Aug 2010 08:31:19 +0100 Subject: [PATCH 005/101] Turn the private _image_url(path) into a public image_url(image). This will be used by virt.xenapi to instruct xapi as to which images to download. As part of this, the value returned became a complete URL, with http:// on the front. This caused the URL parsing to be adjusted. --- nova/virt/images.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/virt/images.py b/nova/virt/images.py index 92210e242d..698536324f 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -23,6 +23,7 @@ Handling of VM disk images. import os.path import time +import urlparse from nova import flags from nova import process @@ -42,7 +43,7 @@ def fetch(image, path, user): return f(image, path, user) def _fetch_s3_image(image, path, user): - url = _image_url('%s/image' % image) + url = image_url(image) # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use @@ -50,8 +51,8 @@ def _fetch_s3_image(image, path, user): headers = {} headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - uri = '/' + url.partition('/')[2] - auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', uri) + (_, _, url_path, _, _, _) = urlparse.urlparse(url) + auth = signer.Signer(user.secret.encode()).s3_authorization(headers, 'GET', url_path) headers['Authorization'] = 'AWS %s:%s' % (user.access, auth) cmd = ['/usr/bin/curl', '--silent', url] @@ -68,5 +69,6 @@ def _fetch_local_image(image, path, _): def _image_path(path): return os.path.join(FLAGS.images_path, path) -def _image_url(path): - return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) +def image_url(image): + return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, + image) From 4c8ae5e0a5b30039075a87ba39aec6da64fdd138 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 00:52:06 +0100 Subject: [PATCH 006/101] Added a xapi plugin that can pull images from nova-objectstore, and use that to get a disk, kernel, and ramdisk for the VM. The VM actually boots! --- nova/virt/xenapi.py | 105 ++++++++- xenapi/README | 2 + xenapi/etc/xapi.d/plugins/objectstore | 231 ++++++++++++++++++++ xenapi/etc/xapi.d/plugins/pluginlib_nova.py | 216 ++++++++++++++++++ 4 files changed, 547 insertions(+), 7 deletions(-) create mode 100644 xenapi/README create mode 100644 xenapi/etc/xapi.d/plugins/objectstore create mode 100755 xenapi/etc/xapi.d/plugins/pluginlib_nova.py diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index dc372e3e33..b84e551385 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -19,6 +19,7 @@ A connection to XenServer or Xen Cloud Platform. """ import logging +import xmlrpclib from twisted.internet import defer from twisted.internet import task @@ -26,7 +27,9 @@ from twisted.internet import task from nova import exception from nova import flags from nova import process +from nova.auth.manager import AuthManager from nova.compute import power_state +from nova.virt import images XenAPI = None @@ -71,10 +74,26 @@ class XenAPIConnection(object): @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): - vm = self.lookup(instance.name) + vm = yield self.lookup(instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) + + user = AuthManager().get_user(instance.datamodel['user_id']) + vdi_uuid = yield self.fetch_image( + instance.datamodel['image_id'], user, True) + kernel = yield self.fetch_image( + instance.datamodel['kernel_id'], user, False) + ramdisk = yield self.fetch_image( + instance.datamodel['ramdisk_id'], user, False) + vdi_ref = yield self._conn.xenapi.VDI.get_by_uuid(vdi_uuid) + + vm_ref = yield self.create_vm(instance, kernel, ramdisk) + yield self.create_vbd(vm_ref, vdi_ref, 0, True) + yield self._conn.xenapi.VM.start(vm_ref, False, False) + + + def create_vm(self, instance, kernel, ramdisk): mem = str(long(instance.datamodel['memory_kb']) * 1024) vcpus = str(instance.datamodel['vcpus']) rec = { @@ -92,9 +111,9 @@ class XenAPIConnection(object): 'actions_after_reboot': 'restart', 'actions_after_crash': 'destroy', 'PV_bootloader': '', - 'PV_kernel': instance.datamodel['kernel_id'], - 'PV_ramdisk': instance.datamodel['ramdisk_id'], - 'PV_args': '', + 'PV_kernel': kernel, + 'PV_ramdisk': ramdisk, + 'PV_args': 'root=/dev/xvda1', 'PV_bootloader_args': '', 'PV_legacy_args': '', 'HVM_boot_policy': '', @@ -106,8 +125,48 @@ class XenAPIConnection(object): 'user_version': '0', 'other_config': {}, } - vm = yield self._conn.xenapi.VM.create(rec) - #yield self._conn.xenapi.VM.start(vm, False, False) + logging.debug('Created VM %s...', instance.name) + vm_ref = self._conn.xenapi.VM.create(rec) + logging.debug('Created VM %s as %s.', instance.name, vm_ref) + return vm_ref + + + def create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + vbd_rec = {} + vbd_rec['VM'] = vm_ref + vbd_rec['VDI'] = vdi_ref + vbd_rec['userdevice'] = str(userdevice) + vbd_rec['bootable'] = bootable + vbd_rec['mode'] = 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) + vbd_ref = self._conn.xenapi.VBD.create(vbd_rec) + logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, + vdi_ref) + return vbd_ref + + + def fetch_image(self, image, user, use_sr): + """use_sr: True to put the image as a VDI in an SR, False to place + it on dom0's filesystem. The former is for VM disks, the latter for + its kernel and ramdisk (if external kernels are being used).""" + + url = images.image_url(image) + logging.debug("Asking xapi to fetch %s as %s" % (url, user.access)) + fn = use_sr and 'get_vdi' or 'get_kernel' + args = {} + args['src_url'] = url + args['username'] = user.access + args['password'] = user.secret + if use_sr: + args['add_partition'] = 'true' + return self._call_plugin('objectstore', fn, args) def reboot(self, instance): @@ -143,10 +202,42 @@ class XenAPIConnection(object): else: return vms[0] + + def _call_plugin(self, plugin, fn, args): + return _unwrap_plugin_exceptions( + self._conn.xenapi.host.call_plugin, + self._get_xenapi_host(), plugin, fn, args) + + + def _get_xenapi_host(self): + return self._conn.xenapi.session.get_this_host(self._conn.handle) + + power_state_from_xenapi = { - 'Halted' : power_state.RUNNING, #FIXME + 'Halted' : power_state.SHUTDOWN, 'Running' : power_state.RUNNING, 'Paused' : power_state.PAUSED, 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed' : power_state.CRASHED } + + +def _unwrap_plugin_exceptions(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, exn: + logging.debug("Got exception: %s", exn) + if (len(exn.details) == 4 and + exn.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and + exn.details[2] == 'Failure'): + params = None + try: + params = eval(exn.details[3]) + except: + raise exn + raise XenAPI.Failure(params) + else: + raise + except xmlrpclib.ProtocolError, exn: + logging.debug("Got exception: %s", exn) + raise diff --git a/xenapi/README b/xenapi/README new file mode 100644 index 0000000000..1fc67aa7a9 --- /dev/null +++ b/xenapi/README @@ -0,0 +1,2 @@ +This directory contains files that are required for the XenAPI support. They +should be installed in the XenServer / Xen Cloud Platform domain 0. diff --git a/xenapi/etc/xapi.d/plugins/objectstore b/xenapi/etc/xapi.d/plugins/objectstore new file mode 100644 index 0000000000..271e7337f8 --- /dev/null +++ b/xenapi/etc/xapi.d/plugins/objectstore @@ -0,0 +1,231 @@ +#!/usr/bin/env python + +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for fetching images from nova-objectstore. +# + +import base64 +import errno +import hmac +import os +import os.path +import sha +import time +import urlparse + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging('objectstore') + + +KERNEL_DIR = '/boot/guest' + +DOWNLOAD_CHUNK_SIZE = 2 * 1024 * 1024 +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE + + +def get_vdi(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + add_partition = validate_bool(args, 'add_partition', 'false') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + sr = find_sr(session) + if sr is None: + raise Exception('Cannot find SR to write VDI to') + + virtual_size = \ + get_content_length(proto, netloc, url_path, username, password) + if virtual_size < 0: + raise Exception('Cannot get VDI size') + + vdi_size = virtual_size + if add_partition: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES + + vdi = create_vdi(session, sr, src_url, vdi_size, False) + with_vdi_in_dom0(session, vdi, False, + lambda dev: get_vdi_(proto, netloc, url_path, + username, password, add_partition, + virtual_size, '/dev/%s' % dev)) + return session.xenapi.VDI.get_uuid(vdi) + + +def get_vdi_(proto, netloc, url_path, username, password, add_partition, + virtual_size, dest): + + if add_partition: + write_partition(virtual_size, dest) + + offset = add_partition and MBR_SIZE_BYTES or 0 + get(proto, netloc, url_path, username, password, dest, offset) + + +def write_partition(virtual_size, dest): + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + logging.debug('Writing partition table %d %d to %s...', + primary_first, primary_last, dest) + + result = os.system('parted --script %s mklabel msdos' % dest) + if result != 0: + raise Exception('Failed to mklabel') + result = os.system('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + if result != 0: + raise Exception('Failed to mkpart') + + logging.debug('Writing partition table %s done.', dest) + + +def find_sr(session): + host = get_this_host(session) + srs = session.xenapi.SR.get_all() + for sr in srs: + sr_rec = session.xenapi.SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.xenapi.PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def get_kernel(session, args): + src_url = exists(args, 'src_url') + username = exists(args, 'username') + password = exists(args, 'password') + + (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) + + dest = os.path.join(KERNEL_DIR, url_path[1:]) + + # Paranoid check against people using ../ to do rude things. + if os.path.commonprefix([KERNEL_DIR, dest]) != KERNEL_DIR: + raise Exception('Illegal destination %s %s', (url_path, dest)) + + dirname = os.path.dirname(dest) + try: + os.makedirs(dirname) + except os.error, e: + if e.errno != errno.EEXIST: + raise + if not os.path.isdir(dirname): + raise Exception('Cannot make directory %s', dirname) + + try: + os.remove(dest) + except: + pass + + get(proto, netloc, url_path, username, password, dest, 0) + + return dest + + +def get_content_length(proto, netloc, url_path, username, password): + headers = make_headers('HEAD', url_path, username, password) + return with_http_connection( + proto, netloc, + lambda conn: get_content_length_(url_path, headers, conn)) + + +def get_content_length_(url_path, headers, conn): + conn.request('HEAD', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + return long(response.getheader('Content-Length', -1)) + + +def get(proto, netloc, url_path, username, password, dest, offset): + headers = make_headers('GET', url_path, username, password) + download(proto, netloc, url_path, headers, dest, offset) + + +def make_headers(verb, url_path, username, password): + headers = {} + headers['Date'] = \ + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) + headers['Authorization'] = \ + 'AWS %s:%s' % (username, + s3_authorization(verb, url_path, password, headers)) + return headers + + +def s3_authorization(verb, path, password, headers): + sha1 = hmac.new(password, digestmod=sha) + sha1.update(plaintext(verb, path, headers)) + return base64.encodestring(sha1.digest()).strip() + + +def plaintext(verb, path, headers): + return '%s\n\n\n%s\n%s' % (verb, + "\n".join([headers[h] for h in headers]), + path) + + +def download(proto, netloc, url_path, headers, dest, offset): + with_http_connection( + proto, netloc, + lambda conn: download_(url_path, dest, offset, headers, conn)) + + +def download_(url_path, dest, offset, headers, conn): + conn.request('GET', url_path, None, headers) + response = conn.getresponse() + if response.status != 200: + raise Exception('%d %s' % (response.status, response.reason)) + + length = response.getheader('Content-Length', -1) + + with_file( + dest, 'a', + lambda dest_file: download_all(response, length, dest_file, offset)) + + +def download_all(response, length, dest_file, offset): + dest_file.seek(offset) + i = 0 + while True: + buf = response.read(DOWNLOAD_CHUNK_SIZE) + if buf: + dest_file.write(buf) + else: + return + i += len(buf) + if length != -1 and i >= length: + return + + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'get_vdi': get_vdi, + 'get_kernel': get_kernel}) diff --git a/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/xenapi/etc/xapi.d/plugins/pluginlib_nova.py new file mode 100755 index 0000000000..2d323a0167 --- /dev/null +++ b/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -0,0 +1,216 @@ +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# Helper functions for the Nova xapi plugins. In time, this will merge +# with the pluginlib.py shipped with xapi, but for now, that file is not +# very stable, so it's easiest just to have a copy of all the functions +# that we need. +# + +import httplib +import logging +import logging.handlers +import re +import time + + +##### Logging setup + +def configure_logging(name): + log = logging.getLogger() + log.setLevel(logging.DEBUG) + sysh = logging.handlers.SysLogHandler('/dev/log') + sysh.setLevel(logging.DEBUG) + formatter = logging.Formatter('%s: %%(levelname)-8s %%(message)s' % name) + sysh.setFormatter(formatter) + log.addHandler(sysh) + + +##### Exceptions + +class PluginError(Exception): + """Base Exception class for all plugin errors.""" + def __init__(self, *args): + Exception.__init__(self, *args) + +class ArgumentError(PluginError): + """Raised when required arguments are missing, argument values are invalid, + or incompatible arguments are given. + """ + def __init__(self, *args): + PluginError.__init__(self, *args) + + +##### Helpers + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, e: + logging.error('Ignoring XenAPI.Failure %s', e) + return None + + +##### Argument validation + +ARGUMENT_PATTERN = re.compile(r'^[a-zA-Z0-9_:\.\-,]+$') + +def validate_exists(args, key, default=None): + """Validates that a string argument to a RPC method call is given, and + matches the shell-safe regex, with an optional default value in case it + does not exist. + + Returns the string. + """ + if key in args: + if len(args[key]) == 0: + raise ArgumentError('Argument %r value %r is too short.' % (key, args[key])) + if not ARGUMENT_PATTERN.match(args[key]): + raise ArgumentError('Argument %r value %r contains invalid characters.' % (key, args[key])) + if args[key][0] == '-': + raise ArgumentError('Argument %r value %r starts with a hyphen.' % (key, args[key])) + return args[key] + elif default is not None: + return default + else: + raise ArgumentError('Argument %s is required.' % key) + +def validate_bool(args, key, default=None): + """Validates that a string argument to a RPC method call is a boolean string, + with an optional default value in case it does not exist. + + Returns the python boolean value. + """ + value = validate_exists(args, key, default) + if value.lower() == 'true': + return True + elif value.lower() == 'false': + return False + else: + raise ArgumentError("Argument %s may not take value %r. Valid values are ['true', 'false']." % (key, value)) + +def exists(args, key): + """Validates that a freeform string argument to a RPC method call is given. + Returns the string. + """ + if key in args: + return args[key] + else: + raise ArgumentError('Argument %s is required.' % key) + +def optional(args, key): + """If the given key is in args, return the corresponding value, otherwise + return None""" + return key in args and args[key] or None + + +def get_this_host(session): + return session.xenapi.session.get_this_host(session.handle) + + +def get_domain_0(session): + this_host_ref = get_this_host(session) + expr = 'field "is_control_domain" = "true" and field "resident_on" = "%s"' % this_host_ref + return session.xenapi.VM.get_all_records_where(expr).keys()[0] + + +def create_vdi(session, sr_ref, name_label, virtual_size, read_only): + vdi_ref = session.xenapi.VDI.create( + { 'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': [] }) + logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, + virtual_size, read_only, sr_ref) + return vdi_ref + + +def with_vdi_in_dom0(session, vdi, read_only, f): + dom0 = get_domain_0(session) + vbd_rec = {} + vbd_rec['VM'] = dom0 + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug('Creating VBD for VDI %s ... ', vdi) + vbd = session.xenapi.VBD.create(vbd_rec) + logging.debug('Creating VBD for VDI %s done.', vdi) + try: + logging.debug('Plugging VBD %s ... ', vbd) + session.xenapi.VBD.plug(vbd) + logging.debug('Plugging VBD %s done.', vbd) + return f(session.xenapi.VBD.get_device(vbd)) + finally: + logging.debug('Destroying VBD for VDI %s ... ', vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.xenapi.VBD.destroy, vbd) + logging.debug('Destroying VBD for VDI %s done.', vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.xenapi.VBD.unplug(vbd) + logging.debug('VBD.unplug successful first time.') + return + except XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + logging.debug('VBD.unplug rejected: retrying...') + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + logging.debug('VBD.unplug successful eventually.') + return + else: + logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) + return + + +def with_http_connection(proto, netloc, f): + conn = (proto == 'https' and + httplib.HTTPSConnection(netloc) or + httplib.HTTPConnection(netloc)) + try: + return f(conn) + finally: + conn.close() + + +def with_file(dest_path, mode, f): + dest = open(dest_path, mode) + try: + return f(dest) + finally: + dest.close() From b31d4f795dbd94bae2c3d8f01aea3b15ed9684b2 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 15:37:31 +0100 Subject: [PATCH 007/101] Define __contains__ on BasicModel, so that we can use "x in datamodel". --- nova/datastore.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/datastore.py b/nova/datastore.py index 9c25923347..f6c11d2c99 100644 --- a/nova/datastore.py +++ b/nova/datastore.py @@ -168,6 +168,9 @@ class BasicModel(object): def setdefault(self, item, default): return self.state.setdefault(item, default) + def __contains__(self, item): + return item in self.state + def __getitem__(self, item): return self.state[item] From 89e057cf2f008ebb7ec1c99605ff99f5849d9b40 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 15:41:35 +0100 Subject: [PATCH 008/101] Implement VIF creation. --- nova/virt/xenapi.py | 57 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 50 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index b84e551385..b4768cffa8 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -79,6 +79,18 @@ class XenAPIConnection(object): raise Exception('Attempted to create non-unique name %s' % instance.name) + if 'bridge_name' in instance.datamodel: + network_ref = \ + yield self._find_network_with_bridge( + instance.datamodel['bridge_name']) + else: + network_ref = None + + if 'mac_address' in instance.datamodel: + mac_address = instance.datamodel['mac_address'] + else: + mac_address = '' + user = AuthManager().get_user(instance.datamodel['user_id']) vdi_uuid = yield self.fetch_image( instance.datamodel['image_id'], user, True) @@ -90,6 +102,8 @@ class XenAPIConnection(object): vm_ref = yield self.create_vm(instance, kernel, ramdisk) yield self.create_vbd(vm_ref, vdi_ref, 0, True) + if network_ref: + yield self._create_vif(vm_ref, network_ref, mac_address) yield self._conn.xenapi.VM.start(vm_ref, False, False) @@ -152,6 +166,35 @@ class XenAPIConnection(object): return vbd_ref + def _create_vif(self, vm_ref, network_ref, mac_address): + vif_rec = {} + vif_rec['device'] = '0' + vif_rec['network']= network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = mac_address + vif_rec['MTU'] = '1500' + vif_rec['other_config'] = {} + vif_rec['qos_algorithm_type'] = '' + vif_rec['qos_algorithm_params'] = {} + logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, + network_ref) + vif_ref = self._conn.xenapi.VIF.create(vif_rec) + logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, + vm_ref, network_ref) + return vif_ref + + + def _find_network_with_bridge(self, bridge): + expr = 'field "bridge" = "%s"' % bridge + networks = self._conn.xenapi.network.get_all_records_where(expr) + if len(networks) == 1: + return networks.keys()[0] + elif len(networks) > 1: + raise Exception('Found non-unique network for bridge %s' % bridge) + else: + raise Exception('Found no network for bridge %s' % bridge) + + def fetch_image(self, image, user, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -213,13 +256,13 @@ class XenAPIConnection(object): return self._conn.xenapi.session.get_this_host(self._conn.handle) - power_state_from_xenapi = { - 'Halted' : power_state.SHUTDOWN, - 'Running' : power_state.RUNNING, - 'Paused' : power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME - 'Crashed' : power_state.CRASHED - } +power_state_from_xenapi = { + 'Halted' : power_state.SHUTDOWN, + 'Running' : power_state.RUNNING, + 'Paused' : power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed' : power_state.CRASHED +} def _unwrap_plugin_exceptions(func, *args, **kwargs): From 035f93aa7dc19656bf22de9b7ccfe12b28cde61b Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 3 Aug 2010 15:42:17 +0100 Subject: [PATCH 009/101] Fix exception in get_info. --- nova/virt/xenapi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index b4768cffa8..c3e84c2b97 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -227,7 +227,7 @@ class XenAPIConnection(object): def get_info(self, instance_id): vm = self.lookup(instance_id) if vm is None: - raise Exception('instance not present %s' % instance.name) + raise Exception('instance not present %s' % instance_id) rec = self._conn.xenapi.VM.get_record(vm) return {'state': power_state_from_xenapi[rec['power_state']], 'max_mem': long(rec['memory_static_max']) >> 10, From e95aac3ac93dabd35eb86951fdc270e06d2b2622 Mon Sep 17 00:00:00 2001 From: andy Date: Tue, 3 Aug 2010 16:51:37 +0200 Subject: [PATCH 010/101] add copyright headers --- nova/tests/declare_flags.py | 18 ++++++++++++++++++ nova/tests/runtime_flags.py | 18 ++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/nova/tests/declare_flags.py b/nova/tests/declare_flags.py index f7c91f9ddb..51a55ec722 100644 --- a/nova/tests/declare_flags.py +++ b/nova/tests/declare_flags.py @@ -1,3 +1,21 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + from nova import flags FLAGS = flags.FLAGS diff --git a/nova/tests/runtime_flags.py b/nova/tests/runtime_flags.py index a2cc4738a9..1eb5014065 100644 --- a/nova/tests/runtime_flags.py +++ b/nova/tests/runtime_flags.py @@ -1,3 +1,21 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + from nova import flags FLAGS = flags.FLAGS From 6dde6fdc10bdb6f75fabce1b0e7a6c4e031937ea Mon Sep 17 00:00:00 2001 From: andy Date: Tue, 3 Aug 2010 18:00:11 +0200 Subject: [PATCH 011/101] updated doc string and wrapper --- nova/flags.py | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 2ec7d9c9f7..b3bdd088f4 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -29,6 +29,14 @@ import gflags class FlagValues(gflags.FlagValues): + """Extension of gflags.FlagValues that allows undefined and runtime flags. + + Unknown flags will be ignored when parsing the command line, but the + command line will be kept so that it can be replayed if new flags are + defined after the initial parsing. + + """ + def __init__(self): gflags.FlagValues.__init__(self) self.__dict__['__dirty'] = [] @@ -125,7 +133,7 @@ class FlagValues(gflags.FlagValues): FLAGS = FlagValues() -def party_wrapper(func): +def _wrapper(func): def _wrapped(*args, **kw): kw.setdefault('flag_values', FLAGS) func(*args, **kw) @@ -133,16 +141,16 @@ def party_wrapper(func): return _wrapped -DEFINE_string = party_wrapper(gflags.DEFINE_string) -DEFINE_integer = party_wrapper(gflags.DEFINE_integer) -DEFINE_bool = party_wrapper(gflags.DEFINE_bool) -DEFINE_boolean = party_wrapper(gflags.DEFINE_boolean) -DEFINE_float = party_wrapper(gflags.DEFINE_float) -DEFINE_enum = party_wrapper(gflags.DEFINE_enum) -DEFINE_list = party_wrapper(gflags.DEFINE_list) -DEFINE_spaceseplist = party_wrapper(gflags.DEFINE_spaceseplist) -DEFINE_multistring = party_wrapper(gflags.DEFINE_multistring) -DEFINE_multi_int = party_wrapper(gflags.DEFINE_multi_int) +DEFINE_string = _wrapper(gflags.DEFINE_string) +DEFINE_integer = _wrapper(gflags.DEFINE_integer) +DEFINE_bool = _wrapper(gflags.DEFINE_bool) +DEFINE_boolean = _wrapper(gflags.DEFINE_boolean) +DEFINE_float = _wrapper(gflags.DEFINE_float) +DEFINE_enum = _wrapper(gflags.DEFINE_enum) +DEFINE_list = _wrapper(gflags.DEFINE_list) +DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist) +DEFINE_multistring = _wrapper(gflags.DEFINE_multistring) +DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int) def DECLARE(name, module_string, flag_values=FLAGS): From 8d70245cc78075356ec1ebabc4810df8b07428f6 Mon Sep 17 00:00:00 2001 From: "Joel Moore joelbm24@gmail.com" <> Date: Tue, 3 Aug 2010 11:02:58 -0700 Subject: [PATCH 012/101] Get IP doesn't fail of you not connected to the intetnet ------------- This line and the following will be ignored -------------- modified: nova/utils.py unknown: CA/cacert.pem CA/index.txt CA/openssl.cnf CA/serial CA/private/cakey.pem bin/nova@ --- nova/utils.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index 0016b656e3..da6efd39a3 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -119,11 +119,15 @@ def get_my_ip(): ''' if getattr(FLAGS, 'fake_tests', None): return '127.0.0.1' - csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - csock.connect(('www.google.com', 80)) - (addr, port) = csock.getsockname() - csock.close() - return addr + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + csock.connect(('www.google.com', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.gaierror as ex: + logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex) + return "127.0.0.1" def isotime(at=None): if not at: From 6187529119ab51a6df7e30ef5190757ee0feca5e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 3 Aug 2010 15:04:38 -0700 Subject: [PATCH 013/101] vblade commands randomly toss stuff into stderr, ignore it --- nova/volume/service.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/nova/volume/service.py b/nova/volume/service.py index e12f675a77..9dd63e88f6 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -227,11 +227,7 @@ class Volume(datastore.BasicModel): @defer.inlineCallbacks def destroy(self): - try: - yield self._remove_export() - except Exception as ex: - logging.debug("Ingnoring failure to remove export %s" % ex) - pass + yield self._remove_export() yield self._delete_lv() super(Volume, self).destroy() @@ -250,7 +246,7 @@ class Volume(datastore.BasicModel): def _delete_lv(self): yield process.simple_execute( "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - self['volume_id'])) + self['volume_id']), error_ok=1) @defer.inlineCallbacks def _setup_export(self): @@ -275,10 +271,10 @@ class Volume(datastore.BasicModel): def _remove_export(self): yield process.simple_execute( "sudo vblade-persist stop %s %s" % (self['shelf_id'], - self['blade_id'])) + self['blade_id']), error_ok=1) yield process.simple_execute( "sudo vblade-persist destroy %s %s" % (self['shelf_id'], - self['blade_id'])) + self['blade_id']), error_ok=1) class FakeVolume(Volume): From 24d5113636a92df386fa076cc89cea5b1c8b2580 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Wed, 4 Aug 2010 11:14:11 +0100 Subject: [PATCH 014/101] Added note to README. --- xenapi/README | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/xenapi/README b/xenapi/README index 1fc67aa7a9..fbd4710356 100644 --- a/xenapi/README +++ b/xenapi/README @@ -1,2 +1,6 @@ This directory contains files that are required for the XenAPI support. They should be installed in the XenServer / Xen Cloud Platform domain 0. + +Also, you need to + +chmod u+x /etc/xapi.d/plugins/objectstore From 4130a506900c833dba831cabbd0197b7d4b59dc0 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Wed, 4 Aug 2010 23:45:41 +0100 Subject: [PATCH 015/101] Move the xenapi top level directory under plugins, as suggested by Jay Pipes. --- {xenapi => plugins/xenapi}/README | 0 {xenapi => plugins/xenapi}/etc/xapi.d/plugins/objectstore | 0 {xenapi => plugins/xenapi}/etc/xapi.d/plugins/pluginlib_nova.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename {xenapi => plugins/xenapi}/README (100%) rename {xenapi => plugins/xenapi}/etc/xapi.d/plugins/objectstore (100%) rename {xenapi => plugins/xenapi}/etc/xapi.d/plugins/pluginlib_nova.py (100%) diff --git a/xenapi/README b/plugins/xenapi/README similarity index 100% rename from xenapi/README rename to plugins/xenapi/README diff --git a/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenapi/etc/xapi.d/plugins/objectstore similarity index 100% rename from xenapi/etc/xapi.d/plugins/objectstore rename to plugins/xenapi/etc/xapi.d/plugins/objectstore diff --git a/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py similarity index 100% rename from xenapi/etc/xapi.d/plugins/pluginlib_nova.py rename to plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py From d79fd0df0bf9c59483b30c0d8c3a811580a1ee39 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 5 Aug 2010 04:31:21 -0700 Subject: [PATCH 016/101] Changed volumes to use a pool instead of globbing filesystem for concurrency reasons. Fixed broken tests. --- nova/tests/volume_unittest.py | 77 +++++++++++++++++++------ nova/volume/service.py | 102 ++++++++++++++++++---------------- 2 files changed, 114 insertions(+), 65 deletions(-) diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 0f4f0e34d2..2a07afe692 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -17,6 +17,10 @@ # under the License. import logging +import shutil +import tempfile + +from twisted.internet import defer from nova import compute from nova import exception @@ -34,10 +38,16 @@ class VolumeTestCase(test.TrialTestCase): super(VolumeTestCase, self).setUp() self.compute = compute.service.ComputeService() self.volume = None + self.tempdir = tempfile.mkdtemp() self.flags(connection_type='fake', - fake_storage=True) + fake_storage=True, + aoe_export_dir=self.tempdir) self.volume = volume_service.VolumeService() + def tearDown(self): + shutil.rmtree(self.tempdir) + + @defer.inlineCallbacks def test_run_create_volume(self): vol_size = '0' user_id = 'fake' @@ -48,34 +58,40 @@ class VolumeTestCase(test.TrialTestCase): volume_service.get_volume(volume_id)['volume_id']) rv = self.volume.delete_volume(volume_id) - self.assertFailure(volume_service.get_volume(volume_id), - exception.Error) + self.assertRaises(exception.Error, volume_service.get_volume, volume_id) + @defer.inlineCallbacks def test_too_big_volume(self): vol_size = '1001' user_id = 'fake' project_id = 'fake' - self.assertRaises(TypeError, - self.volume.create_volume, - vol_size, user_id, project_id) + try: + yield self.volume.create_volume(vol_size, user_id, project_id) + self.fail("Should have thrown TypeError") + except TypeError: + pass + @defer.inlineCallbacks def test_too_many_volumes(self): vol_size = '1' user_id = 'fake' project_id = 'fake' num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1 - total_slots = FLAGS.slots_per_shelf * num_shelves + total_slots = FLAGS.blades_per_shelf * num_shelves vols = [] + from nova import datastore + redis = datastore.Redis.instance() for i in xrange(total_slots): vid = yield self.volume.create_volume(vol_size, user_id, project_id) vols.append(vid) self.assertFailure(self.volume.create_volume(vol_size, user_id, project_id), - volume_service.NoMoreVolumes) + volume_service.NoMoreBlades) for id in vols: yield self.volume.delete_volume(id) + @defer.inlineCallbacks def test_run_attach_detach_volume(self): # Create one volume and one compute to test with instance_id = "storage-test" @@ -84,22 +100,26 @@ class VolumeTestCase(test.TrialTestCase): project_id = 'fake' mountpoint = "/dev/sdf" volume_id = yield self.volume.create_volume(vol_size, user_id, project_id) - volume_obj = volume_service.get_volume(volume_id) volume_obj.start_attach(instance_id, mountpoint) - rv = yield self.compute.attach_volume(volume_id, - instance_id, - mountpoint) + if FLAGS.fake_tests: + volume_obj.finish_attach() + else: + rv = yield self.compute.attach_volume(instance_id, + volume_id, + mountpoint) self.assertEqual(volume_obj['status'], "in-use") - self.assertEqual(volume_obj['attachStatus'], "attached") + self.assertEqual(volume_obj['attach_status'], "attached") self.assertEqual(volume_obj['instance_id'], instance_id) self.assertEqual(volume_obj['mountpoint'], mountpoint) - self.assertRaises(exception.Error, - self.volume.delete_volume, - volume_id) - - rv = yield self.volume.detach_volume(volume_id) + self.assertFailure(self.volume.delete_volume(volume_id), exception.Error) + volume_obj.start_detach() + if FLAGS.fake_tests: + volume_obj.finish_detach() + else: + rv = yield self.volume.detach_volume(instance_id, + volume_id) volume_obj = volume_service.get_volume(volume_id) self.assertEqual(volume_obj['status'], "available") @@ -108,6 +128,27 @@ class VolumeTestCase(test.TrialTestCase): volume_service.get_volume, volume_id) + @defer.inlineCallbacks + def test_multiple_volume_race_condition(self): + vol_size = "5" + user_id = "fake" + project_id = 'fake' + shelf_blades = [] + def _check(volume_id): + vol = volume_service.get_volume(volume_id) + shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id']) + self.assert_(shelf_blade not in shelf_blades) + shelf_blades.append(shelf_blade) + logging.debug("got %s" % shelf_blade) + vol.destroy() + deferreds = [] + for i in range(5): + d = self.volume.create_volume(vol_size, user_id, project_id) + d.addCallback(_check) + d.addErrback(self.fail) + deferreds.append(d) + yield defer.DeferredList(deferreds) + def test_multi_node(self): # TODO(termie): Figure out how to test with two nodes, # each of them having a different FLAG for storage_node diff --git a/nova/volume/service.py b/nova/volume/service.py index 9dd63e88f6..9c52ee469d 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -22,12 +22,8 @@ destroying persistent storage volumes, ala EBS. Currently uses Ata-over-Ethernet. """ -import glob import logging import os -import shutil -import socket -import tempfile from twisted.internet import defer @@ -47,9 +43,6 @@ flags.DEFINE_string('volume_group', 'nova-volumes', 'Name for the VG that will contain exported volumes') flags.DEFINE_string('aoe_eth_dev', 'eth0', 'Which device to export the volumes on') -flags.DEFINE_string('storage_name', - socket.gethostname(), - 'name of this service') flags.DEFINE_integer('first_shelf_id', utils.last_octet(utils.get_my_ip()) * 10, 'AoE starting shelf_id for this service') @@ -59,9 +52,9 @@ flags.DEFINE_integer('last_shelf_id', flags.DEFINE_string('aoe_export_dir', '/var/lib/vblade-persist/vblades', 'AoE directory where exports are created') -flags.DEFINE_integer('slots_per_shelf', +flags.DEFINE_integer('blades_per_shelf', 16, - 'Number of AoE slots per shelf') + 'Number of AoE blades per shelf') flags.DEFINE_string('storage_availability_zone', 'nova', 'availability zone of this service') @@ -69,7 +62,7 @@ flags.DEFINE_boolean('fake_storage', False, 'Should we make real storage volumes to attach?') -class NoMoreVolumes(exception.Error): +class NoMoreBlades(exception.Error): pass def get_volume(volume_id): @@ -77,8 +70,9 @@ def get_volume(volume_id): volume_class = Volume if FLAGS.fake_storage: volume_class = FakeVolume - if datastore.Redis.instance().sismember('volumes', volume_id): - return volume_class(volume_id=volume_id) + vol = volume_class.lookup(volume_id) + if vol: + return vol raise exception.Error("Volume does not exist") class VolumeService(service.Service): @@ -91,18 +85,9 @@ class VolumeService(service.Service): super(VolumeService, self).__init__() self.volume_class = Volume if FLAGS.fake_storage: - FLAGS.aoe_export_dir = tempfile.mkdtemp() self.volume_class = FakeVolume self._init_volume_group() - def __del__(self): - # TODO(josh): Get rid of this destructor, volumes destroy themselves - if FLAGS.fake_storage: - try: - shutil.rmtree(FLAGS.aoe_export_dir) - except Exception, err: - pass - @defer.inlineCallbacks @validate.rangetest(size=(0, 1000)) def create_volume(self, size, user_id, project_id): @@ -113,8 +98,6 @@ class VolumeService(service.Service): """ logging.debug("Creating volume of size: %s" % (size)) vol = yield self.volume_class.create(size, user_id, project_id) - datastore.Redis.instance().sadd('volumes', vol['volume_id']) - datastore.Redis.instance().sadd('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) logging.debug("restarting exports") yield self._restart_exports() defer.returnValue(vol['volume_id']) @@ -134,13 +117,11 @@ class VolumeService(service.Service): def delete_volume(self, volume_id): logging.debug("Deleting volume with id of: %s" % (volume_id)) vol = get_volume(volume_id) - if vol['status'] == "attached": + if vol['attach_status'] == "attached": raise exception.Error("Volume is still attached") - if vol['node_name'] != FLAGS.storage_name: + if vol['node_name'] != FLAGS.node_name: raise exception.Error("Volume is not local to this node") yield vol.destroy() - datastore.Redis.instance().srem('volumes', vol['volume_id']) - datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id']) defer.returnValue(True) @defer.inlineCallbacks @@ -172,14 +153,15 @@ class Volume(datastore.BasicModel): return self.volume_id def default_state(self): - return {"volume_id": self.volume_id} + return {"volume_id": self.volume_id, + "node_name": "unassigned"} @classmethod @defer.inlineCallbacks def create(cls, size, user_id, project_id): volume_id = utils.generate_uid('vol') vol = cls(volume_id) - vol['node_name'] = FLAGS.storage_name + vol['node_name'] = FLAGS.node_name vol['size'] = size vol['user_id'] = user_id vol['project_id'] = project_id @@ -225,10 +207,31 @@ class Volume(datastore.BasicModel): self['attach_status'] = "detached" self.save() + def save(self): + is_new = self.is_new_record() + super(Volume, self).save() + if is_new: + redis = datastore.Redis.instance() + key = self.__devices_key + # TODO(vish): these should be added by admin commands + more = redis.scard(self._redis_association_name("node", + self['node_name'])) + if (not redis.exists(key) and not more): + for shelf_id in range(FLAGS.first_shelf_id, + FLAGS.last_shelf_id + 1): + for blade_id in range(FLAGS.blades_per_shelf): + redis.sadd(key, "%s.%s" % (shelf_id, blade_id)) + self.associate_with("node", self['node_name']) + @defer.inlineCallbacks def destroy(self): yield self._remove_export() yield self._delete_lv() + self.unassociate_with("node", self['node_name']) + if self.get('shelf_id', None) and self.get('blade_id', None): + redis = datastore.Redis.instance() + key = self.__devices_key + redis.sadd(key, "%s.%s" % (self['shelf_id'], self['blade_id'])) super(Volume, self).destroy() @defer.inlineCallbacks @@ -248,17 +251,26 @@ class Volume(datastore.BasicModel): "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']), error_ok=1) + @property + def __devices_key(self): + return 'volume_devices:%s' % FLAGS.node_name + @defer.inlineCallbacks def _setup_export(self): - (shelf_id, blade_id) = get_next_aoe_numbers() + redis = datastore.Redis.instance() + key = self.__devices_key + device = redis.spop(key) + if not device: + raise NoMoreBlades() + (shelf_id, blade_id) = device.split('.') self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) self['shelf_id'] = shelf_id self['blade_id'] = blade_id self.save() - yield self._exec_export() + yield self._exec_setup_export() @defer.inlineCallbacks - def _exec_export(self): + def _exec_setup_export(self): yield process.simple_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % (self['shelf_id'], @@ -269,6 +281,13 @@ class Volume(datastore.BasicModel): @defer.inlineCallbacks def _remove_export(self): + if not self.get('shelf_id', None) or not self.get('blade_id', None): + defer.returnValue(False) + yield self._exec_remove_export() + defer.returnValue(True) + + @defer.inlineCallbacks + def _exec_remove_export(self): yield process.simple_execute( "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']), error_ok=1) @@ -277,29 +296,18 @@ class Volume(datastore.BasicModel): self['blade_id']), error_ok=1) + class FakeVolume(Volume): def _create_lv(self): pass - def _exec_export(self): + def _exec_setup_export(self): fname = os.path.join(FLAGS.aoe_export_dir, self['aoe_device']) f = file(fname, "w") f.close() - def _remove_export(self): - pass + def _exec_remove_export(self): + os.unlink(os.path.join(FLAGS.aoe_export_dir, self['aoe_device'])) def _delete_lv(self): pass - -def get_next_aoe_numbers(): - for shelf_id in xrange(FLAGS.first_shelf_id, FLAGS.last_shelf_id + 1): - aoes = glob.glob("%s/e%s.*" % (FLAGS.aoe_export_dir, shelf_id)) - if not aoes: - blade_id = 0 - else: - blade_id = int(max([int(a.rpartition('.')[2]) for a in aoes])) + 1 - if blade_id < FLAGS.slots_per_shelf: - logging.debug("Next shelf.blade is %s.%s", shelf_id, blade_id) - return (shelf_id, blade_id) - raise NoMoreVolumes() From 64e34d8004662879708f69e476881adc9d6ba45b Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 5 Aug 2010 22:54:08 -0400 Subject: [PATCH 017/101] Use webob to simplify wsgi middleware --- nova/endpoint/new_wsgi.py | 122 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 nova/endpoint/new_wsgi.py diff --git a/nova/endpoint/new_wsgi.py b/nova/endpoint/new_wsgi.py new file mode 100644 index 0000000000..c7eee20fc4 --- /dev/null +++ b/nova/endpoint/new_wsgi.py @@ -0,0 +1,122 @@ +import eventlet +import eventlet.wsgi +eventlet.patcher.monkey_patch(all=False, socket=True) +import carrot.connection +import carrot.messaging +import itertools +import routes + +# See http://pythonpaste.org/webob/ for usage +from webob.dec import wsgify +from webob import exc, Request, Response + +class WSGILayer(object): + def __init__(self, application=None): + self.application = application + + def __call__(self, environ, start_response): + # Subclasses will probably want to implement __call__ like this: + # + # @wsgify + # def __call__(self, req): + # # Any of the following objects work as responses: + # + # # Option 1: simple string + # resp = 'message\n' + # + # # Option 2: a nicely formatted HTTP exception page + # resp = exc.HTTPForbidden(detail='Nice try') + # + # # Option 3: a webob Response object (in case you need to play with + # # headers, or you want to be treated like an iterable, or or or) + # resp = Response(); resp.app_iter = open('somefile') + # + # # Option 4: any wsgi app to be run next + # resp = self.application + # + # # Option 5: you can get a Response object for a wsgi app, too, to + # # play with headers etc + # resp = req.get_response(self.application) + # + # + # # You can then just return your response... + # return resp # option 1 + # # ... or set req.response and return None. + # req.response = resp # option 2 + # + # See the end of http://pythonpaste.org/webob/modules/dec.html + # for more info. + raise NotImplementedError("You must implement __call__") + + +class Debug(WSGILayer): + @wsgify + def __call__(self, req): + for k, v in req.environ.items(): + print k, "=", v + return self.application + +class Auth(WSGILayer): + @wsgify + def __call__(self, req): + if not 'openstack.auth.token' in req.environ: + # Check auth params here + if True: + req.environ['openstack.auth.token'] = '12345' + else: + return exc.HTTPForbidden(detail="Go away") + + response = req.get_response(self.application) + response.headers['X-Openstack-Auth'] = 'Success' + return response + +class Router(WSGILayer): + def __init__(self, application=None): + super(Router, self).__init__(application) + self.map = routes.Mapper() + self._connect() + + @wsgify + def __call__(self, req): + match = self.map.match(req.path_info) + if match is None: + return self.application + req.environ['openstack.match'] = match + return match['controller'] + + def _connect(self): + raise NotImplementedError("You must implement _connect") + +class FileRouter(Router): + def _connect(self): + self.map.connect(None, '/files/{file}', controller=File()) + self.map.connect(None, '/rfiles/{file}', controller=Reverse(File())) + +class Message(WSGILayer): + @wsgify + def __call__(self, req): + return 'message\n' + +class Reverse(WSGILayer): + @wsgify + def __call__(self, req): + inner_resp = req.get_response(self.application) + print "+" * 80 + Debug()(req) + print "*" * 80 + resp = Response() + resp.app_iter = itertools.imap(lambda x: x[::-1], inner_resp.app_iter) + return resp + +class File(WSGILayer): + @wsgify + def __call__(self, req): + try: + myfile = open(req.environ['openstack.match']['file']) + except IOError, e: + raise exc.HTTPNotFound() + req.response = Response() + req.response.app_iter = myfile + +sock = eventlet.listen(('localhost', 12345)) +eventlet.wsgi.server(sock, Debug(Auth(FileRouter(Message())))) From f27d775bee0089e0c86f9a0421a57ab41d0a3a57 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 5 Aug 2010 23:35:16 -0400 Subject: [PATCH 018/101] WsgiStack class, eventletserver.serve. Trying to work toward a simple API that anyone can use to start an eventlet-based server composed of several WSGI apps. --- nova/endpoint/eventletserver.py | 7 +++++++ nova/endpoint/new_wsgi.py | 30 ++++++++++++++++++++++-------- nova/endpoint/rackspace.py | 2 -- 3 files changed, 29 insertions(+), 10 deletions(-) create mode 100644 nova/endpoint/eventletserver.py diff --git a/nova/endpoint/eventletserver.py b/nova/endpoint/eventletserver.py new file mode 100644 index 0000000000..b8c15ff5dd --- /dev/null +++ b/nova/endpoint/eventletserver.py @@ -0,0 +1,7 @@ +import eventlet +import eventlet.wsgi +eventlet.patcher.monkey_patch(all=False, socket=True) + +def serve(app, port): + sock = eventlet.listen(('0.0.0.0', port)) + eventlet.wsgi.server(sock, app) diff --git a/nova/endpoint/new_wsgi.py b/nova/endpoint/new_wsgi.py index c7eee20fc4..0f096ddb71 100644 --- a/nova/endpoint/new_wsgi.py +++ b/nova/endpoint/new_wsgi.py @@ -1,11 +1,10 @@ -import eventlet -import eventlet.wsgi -eventlet.patcher.monkey_patch(all=False, socket=True) +import eventletserver import carrot.connection import carrot.messaging import itertools import routes + # See http://pythonpaste.org/webob/ for usage from webob.dec import wsgify from webob import exc, Request, Response @@ -49,6 +48,19 @@ class WSGILayer(object): raise NotImplementedError("You must implement __call__") +class WsgiStack(WSGILayer): + def __init__(self, wsgi_layers): + bottom_up = list(reversed(wsgi_layers)) + app, remaining = bottom_up[0], bottom_up[1:] + for layer in remaining: + layer.application = app + app = layer + super(WsgiStack, self).__init__(app) + + @wsgify + def __call__(self, req): + return self.application + class Debug(WSGILayer): @wsgify def __call__(self, req): @@ -101,9 +113,6 @@ class Reverse(WSGILayer): @wsgify def __call__(self, req): inner_resp = req.get_response(self.application) - print "+" * 80 - Debug()(req) - print "*" * 80 resp = Response() resp.app_iter = itertools.imap(lambda x: x[::-1], inner_resp.app_iter) return resp @@ -118,5 +127,10 @@ class File(WSGILayer): req.response = Response() req.response.app_iter = myfile -sock = eventlet.listen(('localhost', 12345)) -eventlet.wsgi.server(sock, Debug(Auth(FileRouter(Message())))) +wsgi_layers = [ + Auth(), + Debug(), + FileRouter(), + Message(), + ] +eventletserver.serve(app=WsgiStack(wsgi_layers), port=12345) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index 870aa0629b..323032eb19 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -26,8 +26,6 @@ import logging import multiprocessing import os import time -import tornado.web -from twisted.internet import defer from nova import datastore from nova import exception From a33dce2da8dc8e25d0943732adfa6b14b1e48c7b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 6 Aug 2010 15:48:46 -0700 Subject: [PATCH 019/101] a few more commands were putting output on stderr. In general, exceptions on stderr output seems like a bad idea --- nova/volume/service.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/volume/service.py b/nova/volume/service.py index 9c52ee469d..66163a8121 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -128,8 +128,8 @@ class VolumeService(service.Service): def _restart_exports(self): if FLAGS.fake_storage: return - yield process.simple_execute("sudo vblade-persist auto all") - # NOTE(vish): this command sometimes sends output to stderr for warnings + # NOTE(vish): these commands sometimes sends output to stderr for warnings + yield process.simple_execute("sudo vblade-persist auto all", error_ok=1) yield process.simple_execute("sudo vblade-persist start all", error_ok=1) @defer.inlineCallbacks @@ -243,7 +243,8 @@ class Volume(datastore.BasicModel): yield process.simple_execute( "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], - FLAGS.volume_group)) + FLAGS.volume_group), + error_ok=1) @defer.inlineCallbacks def _delete_lv(self): @@ -277,7 +278,7 @@ class Volume(datastore.BasicModel): self['blade_id'], FLAGS.aoe_eth_dev, FLAGS.volume_group, - self['volume_id'])) + self['volume_id']), error_ok=1) @defer.inlineCallbacks def _remove_export(self): From 91e085b2c272ebd30955a83d3871c402f6749316 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Fri, 6 Aug 2010 18:06:57 -0700 Subject: [PATCH 020/101] Changed the network imports to use new network layout. --- nova/endpoint/rackspace.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index 323032eb19..7a3fbe141f 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -34,7 +34,7 @@ from nova import rpc from nova import utils from nova.auth import manager from nova.compute import model -from nova.compute import network +from nova.network import model as network from nova.endpoint import images from nova.endpoint import wsgi From fd625a55c3725b5cff4449a687b0d54d0d49bd2e Mon Sep 17 00:00:00 2001 From: Eric Day Date: Sat, 7 Aug 2010 12:12:10 -0700 Subject: [PATCH 021/101] Reworked WSGI helper module and converted rackspace API endpoint to use it. --- bin/nova-rsapi | 21 +-- nova/endpoint/eventletserver.py | 7 - nova/endpoint/new_wsgi.py | 136 --------------- nova/endpoint/rackspace.py | 286 ++++++++++---------------------- nova/endpoint/wsgi.py | 40 ----- nova/wsgi.py | 173 +++++++++++++++++++ 6 files changed, 263 insertions(+), 400 deletions(-) delete mode 100644 nova/endpoint/eventletserver.py delete mode 100644 nova/endpoint/new_wsgi.py delete mode 100644 nova/endpoint/wsgi.py create mode 100644 nova/wsgi.py diff --git a/bin/nova-rsapi b/bin/nova-rsapi index a17efccc08..026880d5af 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -1,4 +1,5 @@ #!/usr/bin/env python +# pylint: disable-msg=C0103 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the @@ -17,31 +18,17 @@ # See the License for the specific language governing permissions and # limitations under the License. """ - WSGI daemon for the main API endpoint. + Daemon for the Rackspace API endpoint. """ -import logging -from tornado import ioloop -from wsgiref import simple_server - from nova import flags -from nova import rpc -from nova import server from nova import utils -from nova.auth import manager +from nova import wsgi from nova.endpoint import rackspace FLAGS = flags.FLAGS flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') -def main(_argv): - api_instance = rackspace.Api() - http_server = simple_server.WSGIServer(('0.0.0.0', FLAGS.cc_port), simple_server.WSGIRequestHandler) - http_server.set_app(api_instance.handler) - logging.debug('Started HTTP server on port %i' % FLAGS.cc_port) - while True: - http_server.handle_request() - if __name__ == '__main__': utils.default_flagfile() - server.serve('nova-rsapi', main) + wsgi.run_server(rackspace.API(), FLAGS.cc_port) diff --git a/nova/endpoint/eventletserver.py b/nova/endpoint/eventletserver.py deleted file mode 100644 index b8c15ff5dd..0000000000 --- a/nova/endpoint/eventletserver.py +++ /dev/null @@ -1,7 +0,0 @@ -import eventlet -import eventlet.wsgi -eventlet.patcher.monkey_patch(all=False, socket=True) - -def serve(app, port): - sock = eventlet.listen(('0.0.0.0', port)) - eventlet.wsgi.server(sock, app) diff --git a/nova/endpoint/new_wsgi.py b/nova/endpoint/new_wsgi.py deleted file mode 100644 index 0f096ddb71..0000000000 --- a/nova/endpoint/new_wsgi.py +++ /dev/null @@ -1,136 +0,0 @@ -import eventletserver -import carrot.connection -import carrot.messaging -import itertools -import routes - - -# See http://pythonpaste.org/webob/ for usage -from webob.dec import wsgify -from webob import exc, Request, Response - -class WSGILayer(object): - def __init__(self, application=None): - self.application = application - - def __call__(self, environ, start_response): - # Subclasses will probably want to implement __call__ like this: - # - # @wsgify - # def __call__(self, req): - # # Any of the following objects work as responses: - # - # # Option 1: simple string - # resp = 'message\n' - # - # # Option 2: a nicely formatted HTTP exception page - # resp = exc.HTTPForbidden(detail='Nice try') - # - # # Option 3: a webob Response object (in case you need to play with - # # headers, or you want to be treated like an iterable, or or or) - # resp = Response(); resp.app_iter = open('somefile') - # - # # Option 4: any wsgi app to be run next - # resp = self.application - # - # # Option 5: you can get a Response object for a wsgi app, too, to - # # play with headers etc - # resp = req.get_response(self.application) - # - # - # # You can then just return your response... - # return resp # option 1 - # # ... or set req.response and return None. - # req.response = resp # option 2 - # - # See the end of http://pythonpaste.org/webob/modules/dec.html - # for more info. - raise NotImplementedError("You must implement __call__") - - -class WsgiStack(WSGILayer): - def __init__(self, wsgi_layers): - bottom_up = list(reversed(wsgi_layers)) - app, remaining = bottom_up[0], bottom_up[1:] - for layer in remaining: - layer.application = app - app = layer - super(WsgiStack, self).__init__(app) - - @wsgify - def __call__(self, req): - return self.application - -class Debug(WSGILayer): - @wsgify - def __call__(self, req): - for k, v in req.environ.items(): - print k, "=", v - return self.application - -class Auth(WSGILayer): - @wsgify - def __call__(self, req): - if not 'openstack.auth.token' in req.environ: - # Check auth params here - if True: - req.environ['openstack.auth.token'] = '12345' - else: - return exc.HTTPForbidden(detail="Go away") - - response = req.get_response(self.application) - response.headers['X-Openstack-Auth'] = 'Success' - return response - -class Router(WSGILayer): - def __init__(self, application=None): - super(Router, self).__init__(application) - self.map = routes.Mapper() - self._connect() - - @wsgify - def __call__(self, req): - match = self.map.match(req.path_info) - if match is None: - return self.application - req.environ['openstack.match'] = match - return match['controller'] - - def _connect(self): - raise NotImplementedError("You must implement _connect") - -class FileRouter(Router): - def _connect(self): - self.map.connect(None, '/files/{file}', controller=File()) - self.map.connect(None, '/rfiles/{file}', controller=Reverse(File())) - -class Message(WSGILayer): - @wsgify - def __call__(self, req): - return 'message\n' - -class Reverse(WSGILayer): - @wsgify - def __call__(self, req): - inner_resp = req.get_response(self.application) - resp = Response() - resp.app_iter = itertools.imap(lambda x: x[::-1], inner_resp.app_iter) - return resp - -class File(WSGILayer): - @wsgify - def __call__(self, req): - try: - myfile = open(req.environ['openstack.match']['file']) - except IOError, e: - raise exc.HTTPNotFound() - req.response = Response() - req.response.app_iter = myfile - -wsgi_layers = [ - Auth(), - Debug(), - FileRouter(), - Message(), - ] -eventletserver.serve(app=WsgiStack(wsgi_layers), port=12345) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index 7a3fbe141f..f6735a2605 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -17,206 +17,95 @@ # under the License. """ -Rackspace API +Rackspace API Endpoint """ -import base64 import json -import logging -import multiprocessing -import os import time -from nova import datastore -from nova import exception +import webob.dec +import webob.exc + from nova import flags from nova import rpc from nova import utils +from nova import wsgi from nova.auth import manager -from nova.compute import model +from nova.compute import model as compute from nova.network import model as network -from nova.endpoint import images -from nova.endpoint import wsgi FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') -class Unauthorized(Exception): - pass - -class NotFound(Exception): - pass - - -class Api(object): +class API(wsgi.Middleware): + """Entry point for all requests.""" def __init__(self): - """build endpoints here""" - self.controllers = { - "v1.0": RackspaceAuthenticationApi(), - "servers": RackspaceCloudServerApi() - } + super(API, self).__init__(Router(webob.exc.HTTPNotFound())) - def handler(self, environ, responder): - """ - This is the entrypoint from wsgi. Read PEP 333 and wsgi.org for - more intormation. The key points are responder is a callback that - needs to run before you return, and takes two arguments, response - code string ("200 OK") and headers (["X-How-Cool-Am-I: Ultra-Suede"]) - and the return value is the body of the response. - """ - environ['nova.context'] = self.build_context(environ) - controller, path = wsgi.Util.route( - environ['PATH_INFO'], - self.controllers - ) - logging.debug("Route %s to %s", str(path), str(controller)) - if not controller: - responder("404 Not Found", []) - return "" - try: - rv = controller.process(path, environ) - if type(rv) is tuple: - responder(rv[0], rv[1]) - rv = rv[2] - else: - responder("200 OK", []) - return rv - except Unauthorized: - responder("401 Unauthorized", []) - return "" - except NotFound: - responder("404 Not Found", []) - return "" + def __call__(self, environ, start_response): + context = {} + if "HTTP_X_AUTH_TOKEN" in environ: + context['user'] = manager.AuthManager().get_user_from_access_key( + environ['HTTP_X_AUTH_TOKEN']) + if context['user']: + context['project'] = manager.AuthManager().get_project( + context['user'].name) + if "user" not in context: + return webob.exc.HTTPForbidden()(environ, start_response) + environ['nova.context'] = context + return self.application(environ, start_response) - def build_context(self, env): - rv = {} - if env.has_key("HTTP_X_AUTH_TOKEN"): - rv['user'] = manager.AuthManager().get_user_from_access_key( - env['HTTP_X_AUTH_TOKEN'] - ) - if rv['user']: - rv['project'] = manager.AuthManager().get_project( - rv['user'].name - ) - return rv +class Router(wsgi.Router): + """Route requests to the next WSGI application.""" + + def _build_map(self): + """Build routing map for authentication and cloud.""" + self._connect("/v1.0", controller=AuthenticationAPI()) + cloud = CloudServerAPI() + self._connect("/servers", controller=cloud.launch_server, + conditions={"method": ["POST"]}) + self._connect("/servers/{server_id}", controller=cloud.delete_server, + conditions={'method': ["DELETE"]}) + self._connect("/servers", controller=cloud) -class RackspaceApiEndpoint(object): - def process(self, path, env): - """ - Main entrypoint for all controllers (what gets run by the wsgi handler). - Check authentication based on key, raise Unauthorized if invalid. +class AuthenticationAPI(wsgi.Application): + """Handle all authorization requests through WSGI applications.""" - Select the most appropriate action based on request type GET, POST, etc, - then pass it through to the implementing controller. Defalut to GET if - the implementing child doesn't respond to a particular type. - """ - if not self.check_authentication(env): - raise Unauthorized("Unable to authenticate") - - method = env['REQUEST_METHOD'].lower() - callback = getattr(self, method, None) - if not callback: - callback = getattr(self, "get") - logging.debug("%s processing %s with %s", self, method, callback) - return callback(path, env) - - def get(self, path, env): - """ - The default GET will look at the path and call an appropriate - action within this controller based on the the structure of the path. - - Given the following path lengths (with the first part stripped of by - router, as it is the controller name): - = 0 -> index - = 1 -> first component (/servers/details -> details) - >= 2 -> second path component (/servers/ID/ips/* -> ips) - - This should return - A String if 200 OK and no additional headers - (CODE, HEADERS, BODY) for custom response code and headers - """ - if len(path) == 0 and hasattr(self, "index"): - logging.debug("%s running index", self) - return self.index(env) - if len(path) >= 2: - action = path[1] - else: - action = path.pop(0) - - logging.debug("%s running action %s", self, action) - if hasattr(self, action): - method = getattr(self, action) - return method(path, env) - else: - raise NotFound("Missing method %s" % path[0]) - - def check_authentication(self, env): - if not env['nova.context']['user']: - return False - return True + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 + # TODO(todd): make a actual session with a unique token + # just pass the auth key back through for now + res = webob.Response() + res.status = '204 No Content' + res.headers.add('X-Server-Management-Url', req.host_url) + res.headers.add('X-Storage-Url', req.host_url) + res.headers.add('X-CDN-Managment-Url', req.host_url) + res.headers.add('X-Auth-Token', req.headers['X-Auth-Key']) + return res -class RackspaceAuthenticationApi(object): - - def process(self, path, env): - return self.index(path, env) - - # TODO(todd): make a actual session with a unique token - # just pass the auth key back through for now - def index(self, _path, env): - response = '204 No Content' - headers = [ - ('X-Server-Management-Url', 'http://%s' % env['HTTP_HOST']), - ('X-Storage-Url', 'http://%s' % env['HTTP_HOST']), - ('X-CDN-Managment-Url', 'http://%s' % env['HTTP_HOST']), - ('X-Auth-Token', env['HTTP_X_AUTH_KEY']) - ] - body = "" - return (response, headers, body) - - -class RackspaceCloudServerApi(RackspaceApiEndpoint): +class CloudServerAPI(wsgi.Application): + """Handle all server requests through WSGI applications.""" def __init__(self): - self.instdir = model.InstanceDirectory() + super(CloudServerAPI, self).__init__() + self.instdir = compute.InstanceDirectory() self.network = network.PublicNetworkController() - def post(self, path, env): - if len(path) == 0: - return self.launch_server(env) - - def delete(self, path_parts, env): - if self.delete_server(path_parts[0]): - return ("202 Accepted", [], "") - else: - return ("404 Not Found", [], - "Did not find image, or it was not in a running state") - - - def index(self, env): - return self.detail(env) - - def detail(self, args, env): + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 value = {"servers": []} for inst in self.instdir.all: value["servers"].append(self.instance_details(inst)) return json.dumps(value) - ## - ## - - def launch_server(self, env): - data = json.loads(env['wsgi.input'].read(int(env['CONTENT_LENGTH']))) - inst = self.build_server_instance(data, env['nova.context']) - self.schedule_launch_of_instance(inst) - return json.dumps({"server": self.instance_details(inst)}) - - def instance_details(self, inst): + def instance_details(self, inst): # pylint: disable-msg=R0201 + "Build the data structure to represent details for an instance." return { "id": inst.get("instance_id", None), "imageId": inst.get("image_id", None), @@ -224,11 +113,9 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint): "hostId": inst.get("node_name", None), "status": inst.get("state", "pending"), "addresses": { - "public": [self.network.get_public_ip_for_instance( - inst.get("instance_id", None) - )], - "private": [inst.get("private_dns_name", None)] - }, + "public": [network.get_public_ip_for_instance( + inst.get("instance_id", None))], + "private": [inst.get("private_dns_name", None)]}, # implemented only by Rackspace, not AWS "name": inst.get("name", "Not-Specified"), @@ -237,11 +124,22 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint): "progress": "Not-Supported", "metadata": { "Server Label": "Not-Supported", - "Image Version": "Not-Supported" - } - } + "Image Version": "Not-Supported"}} + + @webob.dec.wsgify + def launch_server(self, req): + """Launch a new instance.""" + data = json.loads(req.body) + inst = self.build_server_instance(data, req.environ['nova.context']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + return json.dumps({"server": self.instance_details(inst)}) def build_server_instance(self, env, context): + """Build instance data structure and save it to the data store.""" reservation = utils.generate_uid('r') ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) inst = self.instdir.new() @@ -253,45 +151,33 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint): inst['reservation_id'] = reservation inst['launch_time'] = ltime inst['mac_address'] = utils.generate_mac() - address = network.allocate_ip( + address = self.network.allocate_ip( inst['user_id'], inst['project_id'], - mac=inst['mac_address'] - ) + mac=inst['mac_address']) inst['private_dns_name'] = str(address) inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( inst['user_id'], inst['project_id'], - 'default' # security group - )['bridge_name'] + 'default')['bridge_name'] # key_data, key_name, ami_launch_index # TODO(todd): key data or root password inst.save() return inst - def schedule_launch_of_instance(self, inst): - rpc.cast( - FLAGS.compute_topic, - { - "method": "run_instance", - "args": {"instance_id": inst.instance_id} - } - ) - - def delete_server(self, instance_id): - owner_hostname = self.host_for_instance(instance_id) - # it isn't launched? + @webob.dec.wsgify + @wsgi.route_args + def delete_server(self, req, route_args): # pylint: disable-msg=R0201 + """Delete an instance.""" + owner_hostname = None + instance = compute.Instance.lookup(route_args['server_id']) + if instance: + owner_hostname = instance["node_name"] if not owner_hostname: - return None + return webob.exc.HTTPNotFound("Did not find image, or it was " + "not in a running state.") rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname) rpc.cast(rpc_transport, {"method": "reboot_instance", - "args": {"instance_id": instance_id}}) - return True - - def host_for_instance(self, instance_id): - instance = model.Instance.lookup(instance_id) - if not instance: - return None - return instance["node_name"] - + "args": {"instance_id": route_args['server_id']}}) + req.status = "202 Accepted" diff --git a/nova/endpoint/wsgi.py b/nova/endpoint/wsgi.py deleted file mode 100644 index b7bb588c39..0000000000 --- a/nova/endpoint/wsgi.py +++ /dev/null @@ -1,40 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -''' -Utility methods for working with WSGI servers -''' - -class Util(object): - - @staticmethod - def route(reqstr, controllers): - if len(reqstr) == 0: - return Util.select_root_controller(controllers), [] - parts = [x for x in reqstr.split("/") if len(x) > 0] - if len(parts) == 0: - return Util.select_root_controller(controllers), [] - return controllers[parts[0]], parts[1:] - - @staticmethod - def select_root_controller(controllers): - if '' in controllers: - return controllers[''] - else: - return None - diff --git a/nova/wsgi.py b/nova/wsgi.py new file mode 100644 index 0000000000..4fd6e59e36 --- /dev/null +++ b/nova/wsgi.py @@ -0,0 +1,173 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utility methods for working with WSGI servers +""" + +import logging +import sys + +import eventlet +import eventlet.wsgi +eventlet.patcher.monkey_patch(all=False, socket=True) +import routes +import routes.middleware + + +logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) + + +def run_server(application, port): + """Run a WSGI server with the given application.""" + sock = eventlet.listen(('0.0.0.0', port)) + eventlet.wsgi.server(sock, application) + + +class Application(object): + """Base WSGI application wrapper. Subclasses need to implement __call__.""" + + def __call__(self, environ, start_response): + r"""Subclasses will probably want to implement __call__ like this: + + @webob.dec.wsgify + def __call__(self, req): + # Any of the following objects work as responses: + + # Option 1: simple string + res = 'message\n' + + # Option 2: a nicely formatted HTTP exception page + res = exc.HTTPForbidden(detail='Nice try') + + # Option 3: a webob Response object (in case you need to play with + # headers, or you want to be treated like an iterable, or or or) + res = Response(); + res.app_iter = open('somefile') + + # Option 4: any wsgi app to be run next + res = self.application + + # Option 5: you can get a Response object for a wsgi app, too, to + # play with headers etc + res = req.get_response(self.application) + + # You can then just return your response... + return res + # ... or set req.response and return None. + req.response = res + + See the end of http://pythonpaste.org/webob/modules/dec.html + for more info. + """ + raise NotImplementedError("You must implement __call__") + + +class Middleware(Application): # pylint: disable-msg=W0223 + """Base WSGI middleware wrapper. These classes require an + application to be initialized that will be called next.""" + + def __init__(self, application): # pylint: disable-msg=W0231 + self.application = application + + +class Debug(Middleware): + """Helper class that can be insertd into any WSGI application chain + to get information about the request and response.""" + + def __call__(self, environ, start_response): + for key, value in environ.items(): + print key, "=", value + print + wrapper = debug_start_response(start_response) + return debug_print_body(self.application(environ, wrapper)) + + +def debug_start_response(start_response): + """Wrap the start_response to capture when called.""" + + def wrapper(status, headers, exc_info=None): + """Print out all headers when start_response is called.""" + print status + for (key, value) in headers: + print key, "=", value + print + start_response(status, headers, exc_info) + + return wrapper + + +def debug_print_body(body): + """Print the body of the response as it is sent back.""" + + class Wrapper(object): + """Iterate through all the body parts and print before returning.""" + + def __iter__(self): + for part in body: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print + + return Wrapper() + + +class ParsedRoutes(Middleware): + """Processed parsed routes from routes.middleware.RoutesMiddleware + and call either the controller if found or the default application + otherwise.""" + + def __call__(self, environ, start_response): + if environ['routes.route'] is None: + return self.application(environ, start_response) + app = environ['wsgiorg.routing_args'][1]['controller'] + return app(environ, start_response) + + +class Router(Middleware): # pylint: disable-msg=R0921 + """Wrapper to help setup routes.middleware.RoutesMiddleware.""" + + def __init__(self, application): + self.map = routes.Mapper() + self._build_map() + application = ParsedRoutes(application) + application = routes.middleware.RoutesMiddleware(application, self.map) + super(Router, self).__init__(application) + + def __call__(self, environ, start_response): + return self.application(environ, start_response) + + def _build_map(self): + """Method to create new connections for the routing map.""" + raise NotImplementedError("You must implement _build_map") + + def _connect(self, *args, **kwargs): + """Wrapper for the map.connect method.""" + self.map.connect(*args, **kwargs) + + +def route_args(application): + """Decorator to make grabbing routing args more convenient.""" + + def wrapper(self, req): + """Call application with req and parsed routing args from.""" + return application(self, req, req.environ['wsgiorg.routing_args'][1]) + + return wrapper From 46c0f66d123e2b4af101bb12408ad6de5eb6855b Mon Sep 17 00:00:00 2001 From: Eric Day Date: Sat, 7 Aug 2010 19:51:17 -0700 Subject: [PATCH 022/101] Cleaned up pep8/pylint for bin/* files. I did not fix rsapi since this is already cleaned up in another branch. --- bin/nova-api | 6 +- bin/nova-dhcpbridge | 42 ++++---- bin/nova-import-canonical-imagestore | 31 +++--- bin/nova-instancemonitor | 18 +--- bin/nova-manage | 138 +++++++++++++++------------ bin/nova-objectstore | 8 +- pylintrc | 3 + run_tests.py | 3 +- 8 files changed, 130 insertions(+), 119 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 1f2009c307..13baf22a73 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -29,8 +29,6 @@ from nova import flags from nova import rpc from nova import server from nova import utils -from nova.auth import manager -from nova.compute import model from nova.endpoint import admin from nova.endpoint import api from nova.endpoint import cloud @@ -39,10 +37,10 @@ FLAGS = flags.FLAGS def main(_argv): + """Load the controllers and start the tornado I/O loop.""" controllers = { 'Cloud': cloud.CloudController(), - 'Admin': admin.AdminController() - } + 'Admin': admin.AdminController()} _app = api.APIServerApplication(controllers) conn = rpc.Connection.instance() diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index b3e7d456ae..ed1af206aa 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -18,8 +18,6 @@ # under the License. """ -nova-dhcpbridge - Handle lease database updates from DHCP servers. """ @@ -42,34 +40,43 @@ from nova.network import service FLAGS = flags.FLAGS -def add_lease(mac, ip, hostname, interface): +def add_lease(_mac, ip, _hostname, _interface): + """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: service.VlanNetworkService().lease_ip(ip) else: - rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name), + rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "lease_ip", - "args" : {"fixed_ip": ip}}) + "args": {"fixed_ip": ip}}) -def old_lease(mac, ip, hostname, interface): + +def old_lease(_mac, _ip, _hostname, _interface): + """Do nothing, just an old lease update.""" logging.debug("Adopted old lease or got a change of mac/hostname") -def del_lease(mac, ip, hostname, interface): + +def del_lease(_mac, ip, _hostname, _interface): + """Remove the leased IP from the databases.""" if FLAGS.fake_rabbit: service.VlanNetworkService().release_ip(ip) else: - rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name), + rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), {"method": "release_ip", - "args" : {"fixed_ip": ip}}) + "args": {"fixed_ip": ip}}) + def init_leases(interface): + """Get the list of hosts for an interface.""" net = model.get_network_by_interface(interface) res = "" for host_name in net.hosts: - res += "%s\n" % linux_net.hostDHCP(net, host_name, net.hosts[host_name]) + res += "%s\n" % linux_net.hostDHCP(net, host_name, + net.hosts[host_name]) return res def main(): + """Parse environment and arguments and call the approproate action.""" flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) @@ -79,18 +86,19 @@ def main(): FLAGS.redis_db = 8 FLAGS.network_size = 32 FLAGS.connection_type = 'fake' - FLAGS.fake_network=True - FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver' + FLAGS.fake_network = True + FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' action = argv[1] - if action in ['add','del','old']: + if action in ['add', 'del', 'old']: mac = argv[2] ip = argv[3] hostname = argv[4] - logging.debug("Called %s for mac %s with ip %s and hostname %s on interface %s" % (action, mac, ip, hostname, interface)) - globals()[action+'_lease'](mac, ip, hostname, interface) + logging.debug("Called %s for mac %s with ip %s and " + "hostname %s on interface %s", + action, mac, ip, hostname, interface) + globals()[action + '_lease'](mac, ip, hostname, interface) else: print init_leases(interface) - exit(0) if __name__ == "__main__": - sys.exit(main()) + main() diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore index 2e79f09b70..5165109b2f 100755 --- a/bin/nova-import-canonical-imagestore +++ b/bin/nova-import-canonical-imagestore @@ -37,20 +37,17 @@ FLAGS = flags.FLAGS api_url = 'https://imagestore.canonical.com/api/dashboard' -image_cache = None -def images(): - global image_cache - if not image_cache: - try: - images = json.load(urllib2.urlopen(api_url))['images'] - image_cache = [i for i in images if i['title'].find('amd64') > -1] - except Exception: - print 'unable to download canonical image list' - sys.exit(1) - return image_cache -# FIXME(ja): add checksum/signature checks +def get_images(): + """Get a list of the images from the imagestore URL.""" + images = json.load(urllib2.urlopen(api_url))['images'] + images = [img for img in images if img['title'].find('amd64') > -1] + return images + + def download(img): + """Download an image to the local filesystem.""" + # FIXME(ja): add checksum/signature checks tempdir = tempfile.mkdtemp(prefix='cis-') kernel_id = None @@ -79,20 +76,22 @@ def download(img): shutil.rmtree(tempdir) + def main(): + """Main entry point.""" utils.default_flagfile() argv = FLAGS(sys.argv) + images = get_images() if len(argv) == 2: - for img in images(): + for img in images: if argv[1] == 'all' or argv[1] == img['title']: download(img) else: print 'usage: %s (title|all)' print 'available images:' - for image in images(): - print image['title'] + for img in images: + print img['title'] if __name__ == '__main__': main() - diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index b195089b74..911fb6f422 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -22,7 +22,6 @@ """ import logging -from twisted.internet import task from twisted.application import service from nova import twistd @@ -30,7 +29,11 @@ from nova.compute import monitor logging.getLogger('boto').setLevel(logging.WARN) -def main(): + +if __name__ == '__main__': + twistd.serve(__file__) + +if __name__ == '__builtin__': logging.warn('Starting instance monitor') m = monitor.InstanceMonitor() @@ -38,14 +41,3 @@ def main(): # parses this file, return it so that we can get it into globals below application = service.Application('nova-instancemonitor') m.setServiceParent(application) - return application - -if __name__ == '__main__': - twistd.serve(__file__) - -if __name__ == '__builtin__': - application = main() - - - - diff --git a/bin/nova-manage b/bin/nova-manage index 7835c7a77c..2dd569df0c 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -37,12 +37,15 @@ FLAGS = flags.FLAGS class VpnCommands(object): + """Class for managing VPNs.""" + def __init__(self): self.manager = manager.AuthManager() self.instdir = model.InstanceDirectory() self.pipe = pipelib.CloudPipe(cloud.CloudController()) def list(self): + """Print a listing of the VPNs for all projects.""" print "%-12s\t" % 'project', print "%-12s\t" % 'ip:port', print "%s" % 'state' @@ -50,9 +53,10 @@ class VpnCommands(object): print "%-12s\t" % project.name, print "%s:%s\t" % (project.vpn_ip, project.vpn_port), - vpn = self.__vpn_for(project.id) + vpn = self._vpn_for(project.id) if vpn: - out, err = utils.execute("ping -c1 -w1 %s > /dev/null; echo $?" % vpn['private_dns_name']) + command = "ping -c1 -w1 %s > /dev/null; echo $?" + out, _err = utils.execute(command % vpn['private_dns_name']) if out.strip() == '0': net = 'up' else: @@ -66,25 +70,32 @@ class VpnCommands(object): else: print None - def __vpn_for(self, project_id): + def _vpn_for(self, project_id): + """Get the VPN instance for a project ID.""" for instance in self.instdir.all: - if (instance.state.has_key('image_id') + if ('image_id' in instance.state and instance['image_id'] == FLAGS.vpn_image_id - and not instance['state_description'] in ['shutting_down', 'shutdown'] + and not instance['state_description'] in + ['shutting_down', 'shutdown'] and instance['project_id'] == project_id): return instance def spawn(self): + """Run all VPNs.""" for p in reversed(self.manager.get_projects()): - if not self.__vpn_for(p.id): - print 'spawning %s' % p.id - self.pipe.launch_vpn_instance(p.id) - time.sleep(10) + if not self._vpn_for(p.id): + print 'spawning %s' % p.id + self.pipe.launch_vpn_instance(p.id) + time.sleep(10) def run(self, project_id): + """Start the VPN for a given project.""" self.pipe.launch_vpn_instance(project_id) + class RoleCommands(object): + """Class for managing roles.""" + def __init__(self): self.manager = manager.AuthManager() @@ -107,25 +118,24 @@ class RoleCommands(object): arguments: user, role [project]""" self.manager.remove_role(user, role, project) + class UserCommands(object): + """Class for managing users.""" + def __init__(self): self.manager = manager.AuthManager() - def __print_export(self, user): - print 'export EC2_ACCESS_KEY=%s' % user.access - print 'export EC2_SECRET_KEY=%s' % user.secret - def admin(self, name, access=None, secret=None): """creates a new admin and prints exports arguments: name [access] [secret]""" user = self.manager.create_user(name, access, secret, True) - self.__print_export(user) + print_export(user) def create(self, name, access=None, secret=None): """creates a new user and prints exports arguments: name [access] [secret]""" user = self.manager.create_user(name, access, secret, False) - self.__print_export(user) + print_export(user) def delete(self, name): """deletes an existing user @@ -137,7 +147,7 @@ class UserCommands(object): arguments: name""" user = self.manager.get_user(name) if user: - self.__print_export(user) + print_export(user) else: print "User %s doesn't exist" % name @@ -147,53 +157,58 @@ class UserCommands(object): for user in self.manager.get_users(): print user.name + +def print_export(user): + """Print export variables to use with API.""" + print 'export EC2_ACCESS_KEY=%s' % user.access + print 'export EC2_SECRET_KEY=%s' % user.secret + + class ProjectCommands(object): + """Class for managing projects.""" + def __init__(self): self.manager = manager.AuthManager() def add(self, project, user): - """adds user to project + """Adds user to project arguments: project user""" self.manager.add_to_project(user, project) def create(self, name, project_manager, description=None): - """creates a new project + """Creates a new project arguments: name project_manager [description]""" - user = self.manager.create_project(name, project_manager, description) + self.manager.create_project(name, project_manager, description) def delete(self, name): - """deletes an existing project + """Deletes an existing project arguments: name""" self.manager.delete_project(name) def environment(self, project_id, user_id, filename='novarc'): - """exports environment variables to an sourcable file + """Exports environment variables to an sourcable file arguments: project_id user_id [filename='novarc]""" rc = self.manager.get_environment_rc(project_id, user_id) with open(filename, 'w') as f: f.write(rc) def list(self): - """lists all projects + """Lists all projects arguments: """ for project in self.manager.get_projects(): print project.name def remove(self, project, user): - """removes user from project + """Removes user from project arguments: project user""" self.manager.remove_from_project(user, project) - def zip(self, project_id, user_id, filename='nova.zip'): - """exports credentials for project to a zip file + def create_zip(self, project_id, user_id, filename='nova.zip'): + """Exports credentials for project to a zip file arguments: project_id user_id [filename='nova.zip]""" - zip = self.manager.get_credentials(project_id, user_id) + zip_file = self.manager.get_credentials(project_id, user_id) with open(filename, 'w') as f: - f.write(zip) - - -def usage(script_name): - print script_name + " category action []" + f.write(zip_file) categories = [ @@ -205,62 +220,61 @@ categories = [ def lazy_match(name, key_value_tuples): - """finds all objects that have a key that case insensitively contains [name] - key_value_tuples is a list of tuples of the form (key, value) + """Finds all objects that have a key that case insensitively contains + [name] key_value_tuples is a list of tuples of the form (key, value) returns a list of tuples of the form (key, value)""" - return [(k, v) for (k, v) in key_value_tuples if k.lower().find(name.lower()) == 0] + result = [] + for (k, v) in key_value_tuples: + if k.lower().find(name.lower()) == 0: + result.append((k, v)) + if len(result) == 0: + print "%s does not match any options:" % name + for k, _v in key_value_tuples: + print "\t%s" % k + sys.exit(2) + if len(result) > 1: + print "%s matched multiple options:" % name + for k, _v in result: + print "\t%s" % k + sys.exit(2) + return result def methods_of(obj): - """get all callable methods of an object that don't start with underscore + """Get all callable methods of an object that don't start with underscore returns a list of tuples of the form (method_name, method)""" - return [(i, getattr(obj, i)) for i in dir(obj) if callable(getattr(obj, i)) and not i.startswith('_')] + result = [] + for i in dir(obj): + if callable(getattr(obj, i)) and not i.startswith('_'): + result.append((i, getattr(obj, i))) + return result -if __name__ == '__main__': +def main(): + """Parse options and call the appropriate class/method.""" utils.default_flagfile('/etc/nova/nova-manage.conf') argv = FLAGS(sys.argv) script_name = argv.pop(0) if len(argv) < 1: - usage(script_name) + print script_name + " category action []" print "Available categories:" - for k, v in categories: + for k, _ in categories: print "\t%s" % k sys.exit(2) category = argv.pop(0) matches = lazy_match(category, categories) - if len(matches) == 0: - print "%s does not match any categories:" % category - for k, v in categories: - print "\t%s" % k - sys.exit(2) - if len(matches) > 1: - print "%s matched multiple categories:" % category - for k, v in matches: - print "\t%s" % k - sys.exit(2) # instantiate the command group object category, fn = matches[0] command_object = fn() actions = methods_of(command_object) if len(argv) < 1: - usage(script_name) + print script_name + " category action []" print "Available actions for %s category:" % category - for k, v in actions: + for k, _v in actions: print "\t%s" % k sys.exit(2) action = argv.pop(0) matches = lazy_match(action, actions) - if len(matches) == 0: - print "%s does not match any actions" % action - for k, v in actions: - print "\t%s" % k - sys.exit(2) - if len(matches) > 1: - print "%s matched multiple actions:" % action - for k, v in matches: - print "\t%s" % k - sys.exit(2) action, fn = matches[0] # call the action with the remaining arguments try: @@ -271,3 +285,5 @@ if __name__ == '__main__': print "%s %s: %s" % (category, action, fn.__doc__) sys.exit(2) +if __name__ == '__main__': + main() diff --git a/bin/nova-objectstore b/bin/nova-objectstore index c0fa815c05..02f2bcb48d 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -30,15 +30,9 @@ from nova.objectstore import handler FLAGS = flags.FLAGS -def main(): - app = handler.get_application() - print app - return app - -# NOTE(soren): Stolen from nova-compute if __name__ == '__main__': twistd.serve(__file__) if __name__ == '__builtin__': utils.default_flagfile() - application = main() + application = handler.get_application() diff --git a/pylintrc b/pylintrc index a853e5bed2..258d3556d8 100644 --- a/pylintrc +++ b/pylintrc @@ -1,3 +1,6 @@ +[Messages Control] +disable-msg=C0103 + [Basic] method-rgx=[a-z_][a-z0-9_]{2,50}$ diff --git a/run_tests.py b/run_tests.py index 5a8966f026..f62088253d 100644 --- a/run_tests.py +++ b/run_tests.py @@ -68,7 +68,8 @@ flags.DEFINE_bool('flush_db', True, 'Flush the database before running fake tests') flags.DEFINE_string('tests_stderr', 'run_tests.err.log', - 'Path to where to pipe STDERR during test runs. Default = "run_tests.err.log"') + 'Path to where to pipe STDERR during test runs. ' + 'Default = "run_tests.err.log"') if __name__ == '__main__': OptionsClass = twistd.WrapTwistedOptions(trial_script.Options) From 86a7e62f0b72763088b0a34516ffc30f22ca937e Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 09:49:47 -0700 Subject: [PATCH 023/101] adding pep8 and pylint for regular cleanup tasks --- tools/pip-requires | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b0..24aefb25e5 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,3 +1,5 @@ +pep8==0.5.0 +pylint==0.21.1 IPy==0.70 M2Crypto==0.20.2 amqplib==0.6.1 From abd9bed8f7f88617c0a402faef47da13963ccea7 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 09:50:22 -0700 Subject: [PATCH 024/101] attempting some cleanup work --- nova/endpoint/cloud.py | 77 ++++++++++++++++++++++-------------------- 1 file changed, 40 insertions(+), 37 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 878d54a15a..ee22863a91 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -47,6 +47,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + def _gen_key(user_id, key_name): """ Tuck this into AuthManager """ try: @@ -102,15 +103,16 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], + INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: result[instance['key_name']] = [line] return result - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) + def get_metadata(self, ipaddress): + i = self.get_instance_by_ip(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -147,7 +149,7 @@ class CloudController(object): }, 'public-hostname': i.get('dns_name', ''), 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, + 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), 'reservation-id': i['reservation_id'], 'security-groups': i.get('groups', ''), @@ -203,7 +205,7 @@ class CloudController(object): 'keyFingerprint': key_pair.fingerprint, }) - return { 'keypairsSet': result } + return {'keypairsSet': result} @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): @@ -232,7 +234,7 @@ class CloudController(object): @rbac.allow('all') def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } + groups = {'securityGroupSet': []} # Stubbed for now to unblock other things. return groups @@ -251,7 +253,7 @@ class CloudController(object): instance = self._get_instance(context, instance_id[0]) return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) + "args": {"instance_id": instance_id[0]}}) def _get_user_id(self, context): if context and context.user: @@ -285,10 +287,10 @@ class CloudController(object): if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] + 'device': volume['mountpoint'], + 'instanceId': volume['instance_id'], + 'status': 'attached', + 'volume_id': volume['volume_id']}] else: v['attachmentSet'] = [{}] return v @@ -298,7 +300,7 @@ class CloudController(object): def create_volume(self, context, size, **kwargs): # TODO(vish): refactor this to create the volume object here and tell service to create it result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, + "args": {"size": size, "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary @@ -348,15 +350,15 @@ class CloudController(object): compute_node = instance['node_name'] rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) @rbac.allow('projectmanager', 'sysadmin') @@ -372,18 +374,18 @@ class CloudController(object): instance = self._get_instance(context, instance_id) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "detach_volume", - "args" : {"instance_id": instance_id, + "args": {"instance_id": instance_id, "volume_id": volume_id}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -425,7 +427,8 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) + instance.get('project_id', None), + instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') i['instance_type'] = instance.get('instance_type', None) @@ -442,7 +445,7 @@ class CloudController(object): reservations[res_id] = r reservations[res_id]['instances_set'].append(i) - instance_response = {'reservationSet' : list(reservations.values()) } + instance_response = {'reservationSet': list(reservations.values())} return instance_response @rbac.allow('all') @@ -457,7 +460,7 @@ class CloudController(object): address['project_id'] == context.project.id): address_rv = { 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') + 'instance_id': address.get('instance_id', 'free') } if context.user.is_admin(): address_rv['instance_id'] = "%s (%s, %s)" % ( @@ -477,7 +480,7 @@ class CloudController(object): "args": {"user_id": context.user.id, "project_id": context.project.id}}) public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @defer.inlineCallbacks @@ -591,7 +594,7 @@ class CloudController(object): inst.save() rpc.cast(FLAGS.compute_topic, {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) + "args": {"instance_id": inst.instance_id}}) logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) # TODO: Make Network figure out the network name from ip. @@ -646,7 +649,7 @@ class CloudController(object): instance = self._get_instance(context, i) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "reboot_instance", - "args" : {"instance_id": i}}) + "args": {"instance_id": i}}) return defer.succeed(True) @rbac.allow('projectmanager', 'sysadmin') @@ -656,7 +659,7 @@ class CloudController(object): volume_node = volume['node_name'] rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) + "args": {"volume_id": volume_id}}) return defer.succeed(True) @rbac.allow('all') @@ -689,9 +692,9 @@ class CloudController(object): image = images.list(context, image_id)[0] except IndexError: raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } + result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) + result['launchPermission'].append({'group': 'all'}) return defer.succeed(result) @rbac.allow('projectmanager', 'sysadmin') From e59b769cf1ad12f63788d2e90fd3a4412f9db6f4 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 11:39:14 -0700 Subject: [PATCH 025/101] variable name cleanup --- nova/endpoint/cloud.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index ee22863a91..8b937306e1 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -210,18 +210,18 @@ class CloudController(object): @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') def _complete(kwargs): if 'exception' in kwargs: - d.errback(kwargs['exception']) + dcall.errback(kwargs['exception']) return - d.callback({'keyName': key_name, + dcall.callback({'keyName': key_name, 'keyFingerprint': kwargs['fingerprint'], 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], + pool.apply_async(_gen_key, [context.user.id, key_name], callback=_complete) - return d + return dcall except manager.UserError as e: raise From 3fe167e1e398b3d602699b8219dcbfc8fec86859 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 11:40:03 -0700 Subject: [PATCH 026/101] removing what appears to be an unused try/except statement - nova.auth.manager.UserError doesn't exist in this codebase. Leftover? Something intended to be there but never added? --- nova/endpoint/cloud.py | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 8b937306e1..ad9188ff3d 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -209,22 +209,18 @@ class CloudController(object): @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): - try: - dcall = defer.Deferred() - pool = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - dcall.errback(kwargs['exception']) - return - dcall.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - pool.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return dcall - - except manager.UserError as e: - raise + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + dcall.errback(kwargs['exception']) + return + dcall.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + pool.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return dcall @rbac.allow('all') def delete_key_pair(self, context, key_name, **kwargs): From 86150042191005a9bf04ef243396667cb9dad1b0 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 13:20:50 -0700 Subject: [PATCH 027/101] convention and variable naming cleanup for pylint/pep8 --- nova/network/model.py | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index daac035e42..eada776c76 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -97,11 +97,11 @@ class Vlan(datastore.BasicModel): def dict_by_vlan(cls): """a hash of vlan:project""" set_name = cls._redis_set_name(cls.__name__) - rv = {} - h = datastore.Redis.instance().hgetall(set_name) - for v in h.keys(): - rv[h[v]] = v - return rv + retvals = {} + hashset = datastore.Redis.instance().hgetall(set_name) + for val in hashset.keys(): + retvals[hashset[val]] = val + return retvals @classmethod @datastore.absorb_connection_error @@ -136,7 +136,8 @@ class Vlan(datastore.BasicModel): # CLEANUP: # TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients -# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win? +# TODO(ja): does vlanpool "keeper" need to know the min/max - +# shouldn't FLAGS always win? # TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients class BaseNetwork(datastore.BasicModel): @@ -217,7 +218,9 @@ class BaseNetwork(datastore.BasicModel): def available(self): # the .2 address is always CloudPipe # and the top are for vpn clients - for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)): + num_ips = self.num_static_ips + num_clients = FLAGS.cnt_vpn_clients + for idx in range(num_ips, len(self.network)-(1 + num_clients)): address = str(self.network[idx]) if not address in self.hosts.keys(): yield address @@ -338,8 +341,9 @@ class DHCPNetwork(BridgedNetwork): private_ip = str(self.network[2]) linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT" % (private_ip, )) - linux_net.confirm_rule("PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" - % (self.project.vpn_ip, self.project.vpn_port, private_ip)) + linux_net.confirm_rule( + "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" + % (self.project.vpn_ip, self.project.vpn_port, private_ip)) def deexpress(self, address=None): # if this is the last address, stop dns @@ -374,13 +378,14 @@ class PublicAddress(datastore.BasicModel): return addr -DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)] +DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): override_type = 'network' def __init__(self, *args, **kwargs): network_id = "public:default" - super(PublicNetworkController, self).__init__(network_id, FLAGS.public_range) + super(PublicNetworkController, self).__init__(network_id, + FLAGS.public_range) self['user_id'] = "public" self['project_id'] = "public" self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) @@ -415,7 +420,7 @@ class PublicNetworkController(BaseNetwork): def deallocate_ip(self, ip_str): # NOTE(vish): cleanup is now done on release by the parent class - self.release_ip(ip_str) + self.release_ip(ip_str) def associate_address(self, public_ip, private_ip, instance_id): if not public_ip in self.assigned: @@ -461,8 +466,9 @@ class PublicNetworkController(BaseNetwork): linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT" % (private_ip)) for (protocol, port) in DEFAULT_PORTS: - linux_net.confirm_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT" - % (private_ip, protocol, port)) + linux_net.confirm_rule( + "FORWARD -d %s -p %s --dport %s -j ACCEPT" + % (private_ip, protocol, port)) def deexpress(self, address=None): addr = self.get_host(address) From 21c1d379199c528024c5e85571609e77e53c6ee7 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 8 Aug 2010 13:31:40 -0700 Subject: [PATCH 028/101] light cleanup - convention stuff mostly --- nova/auth/manager.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index d44ed52b20..e5efbca246 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -29,15 +29,17 @@ import uuid import zipfile from nova import crypto -from nova import datastore from nova import exception from nova import flags -from nova import objectstore # for flags from nova import utils -from nova.auth import ldapdriver # for flags from nova.auth import signer from nova.network import vpn +#unused imports +#from nova import datastore +#from nova.auth import ldapdriver # for flags +#from nova import objectstore # for flags + FLAGS = flags.FLAGS # NOTE(vish): a user with one of these roles will be a superuser and @@ -99,6 +101,7 @@ class AuthBase(object): class User(AuthBase): """Object representing a user""" def __init__(self, id, name, access, secret, admin): + AuthBase.__init__(self) self.id = id self.name = name self.access = access @@ -159,6 +162,7 @@ class KeyPair(AuthBase): fingerprint is stored. The user's private key is not saved. """ def __init__(self, id, name, owner_id, public_key, fingerprint): + AuthBase.__init__(self) self.id = id self.name = name self.owner_id = owner_id @@ -176,6 +180,7 @@ class KeyPair(AuthBase): class Project(AuthBase): """Represents a Project returned from the datastore""" def __init__(self, id, name, project_manager_id, description, member_ids): + AuthBase.__init__(self) self.id = id self.name = name self.project_manager_id = project_manager_id @@ -234,7 +239,7 @@ class AuthManager(object): AuthManager also manages associated data related to Auth objects that need to be more accessible, such as vpn ips and ports. """ - _instance=None + _instance = None def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" if not cls._instance: @@ -248,7 +253,7 @@ class AuthManager(object): reset the driver if it is not set or a new driver is specified. """ if driver or not getattr(self, 'driver', None): - self.driver = utils.import_class(driver or FLAGS.auth_driver) + self.driver = utils.import_class(driver or FLAGS.auth_driver) def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', From d1977a820db3dad7e907e976c5502ffd37e1b719 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Mon, 9 Aug 2010 13:23:19 +0100 Subject: [PATCH 029/101] Move the xenapi top level directory under plugins, as suggested by Jay Pipes. --- {xenapi => plugins/xenapi}/README | 0 {xenapi => plugins/xenapi}/etc/xapi.d/plugins/objectstore | 0 {xenapi => plugins/xenapi}/etc/xapi.d/plugins/pluginlib_nova.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename {xenapi => plugins/xenapi}/README (100%) rename {xenapi => plugins/xenapi}/etc/xapi.d/plugins/objectstore (100%) rename {xenapi => plugins/xenapi}/etc/xapi.d/plugins/pluginlib_nova.py (100%) diff --git a/xenapi/README b/plugins/xenapi/README similarity index 100% rename from xenapi/README rename to plugins/xenapi/README diff --git a/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenapi/etc/xapi.d/plugins/objectstore similarity index 100% rename from xenapi/etc/xapi.d/plugins/objectstore rename to plugins/xenapi/etc/xapi.d/plugins/objectstore diff --git a/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py similarity index 100% rename from xenapi/etc/xapi.d/plugins/pluginlib_nova.py rename to plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py From a31fe618b94f87cf03a090db04dace732c58951c Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Mon, 9 Aug 2010 09:47:08 -0400 Subject: [PATCH 030/101] pylint fixes for /nova/test.py --- nova/test.py | 74 +++++++++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 33 deletions(-) diff --git a/nova/test.py b/nova/test.py index 6fbcab5e41..820cdda567 100644 --- a/nova/test.py +++ b/nova/test.py @@ -1,4 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +# pylint: disable-msg=C0103 +# pylint: disable-msg=W0511 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -22,15 +24,14 @@ Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ -import logging import mox import stubout +import sys import time -import unittest + from tornado import ioloop from twisted.internet import defer -from twisted.python import failure -from twisted.trial import unittest as trial_unittest +from twisted.trial import unittest from nova import fakerabbit from nova import flags @@ -41,20 +42,21 @@ flags.DEFINE_bool('fake_tests', True, 'should we use everything for testing') -def skip_if_fake(f): +def skip_if_fake(func): + """Decorator that skips a test if running in fake mode""" def _skipper(*args, **kw): + """Wrapped skipper function""" if FLAGS.fake_tests: - raise trial_unittest.SkipTest('Test cannot be run in fake mode') + raise unittest.SkipTest('Test cannot be run in fake mode') else: - return f(*args, **kw) - - _skipper.func_name = f.func_name + return func(*args, **kw) return _skipper -class TrialTestCase(trial_unittest.TestCase): - +class TrialTestCase(unittest.TestCase): + """Test case base class for all unit tests""" def setUp(self): + """Run before each test method to initialize test environment""" super(TrialTestCase, self).setUp() # emulate some of the mox stuff, we can't use the metaclass @@ -64,6 +66,7 @@ class TrialTestCase(trial_unittest.TestCase): self.flag_overrides = {} def tearDown(self): + """Runs after each test method to finalize/tear down test environment""" super(TrialTestCase, self).tearDown() self.reset_flags() self.mox.UnsetStubs() @@ -75,6 +78,7 @@ class TrialTestCase(trial_unittest.TestCase): fakerabbit.reset_all() def flags(self, **kw): + """Override flag variables for a test""" for k, v in kw.iteritems(): if k in self.flag_overrides: self.reset_flags() @@ -84,13 +88,17 @@ class TrialTestCase(trial_unittest.TestCase): setattr(FLAGS, k, v) def reset_flags(self): + """Resets all flag variables for the test. Runs after each test""" for k, v in self.flag_overrides.iteritems(): setattr(FLAGS, k, v) class BaseTestCase(TrialTestCase): - def setUp(self): + # TODO(jaypipes): Can this be moved into the TrialTestCase class? + """Base test case class for all unit tests.""" + def setUp(self): # pylint: disable-msg=W0511 + """Run before each test method to initialize test environment""" super(BaseTestCase, self).setUp() # TODO(termie): we could possibly keep a more global registry of # the injected listeners... this is fine for now though @@ -98,33 +106,27 @@ class BaseTestCase(TrialTestCase): self.ioloop = ioloop.IOLoop.instance() self._waiting = None - self._doneWaiting = False - self._timedOut = False - self.set_up() - - def set_up(self): - pass - - def tear_down(self): - pass + self._done_waiting = False + self._timed_out = False def tearDown(self): + """Runs after each test method to finalize/tear down test environment""" super(BaseTestCase, self).tearDown() for x in self.injected: x.stop() if FLAGS.fake_rabbit: fakerabbit.reset_all() - self.tear_down() def _waitForTest(self, timeout=60): """ Push the ioloop along to wait for our test to complete. """ self._waiting = self.ioloop.add_timeout(time.time() + timeout, self._timeout) def _wait(): - if self._timedOut: + """Wrapped wait function. Called on timeout.""" + if self._timed_out: self.fail('test timed out') self._done() - if self._doneWaiting: + if self._done_waiting: self.ioloop.stop() return # we can use add_callback here but this uses less cpu when testing @@ -134,13 +136,16 @@ class BaseTestCase(TrialTestCase): self.ioloop.start() def _done(self): + """Callback used for cleaning up deferred test methods.""" if self._waiting: try: self.ioloop.remove_timeout(self._waiting) - except Exception: + except Exception: # pylint: disable-msg=W0703 + # TODO(jaypipes): This produces a pylint warning. Should + # we really be catching Exception and then passing here? pass self._waiting = None - self._doneWaiting = True + self._done_waiting = True def _maybeInlineCallbacks(self, f): """ If we're doing async calls in our tests, wait on them. @@ -189,6 +194,7 @@ class BaseTestCase(TrialTestCase): return d def _catchExceptions(self, result, failure): + """Catches all exceptions and handles keyboard interrupts.""" exc = (failure.type, failure.value, failure.getTracebackObject()) if isinstance(failure.value, self.failureException): result.addFailure(self, exc) @@ -200,11 +206,12 @@ class BaseTestCase(TrialTestCase): self._done() def _timeout(self): + """Helper method which trips the timeouts""" self._waiting = False - self._timedOut = True + self._timed_out = True def run(self, result=None): - if result is None: result = self.defaultTestResult() + """Runs the test case""" result.startTest(self) testMethod = getattr(self, self._testMethodName) @@ -214,7 +221,7 @@ class BaseTestCase(TrialTestCase): except KeyboardInterrupt: raise except: - result.addError(self, self._exc_info()) + result.addError(self, sys.exc_info()) return ok = False @@ -225,19 +232,20 @@ class BaseTestCase(TrialTestCase): self._waitForTest() ok = True except self.failureException: - result.addFailure(self, self._exc_info()) + result.addFailure(self, sys.exc_info()) except KeyboardInterrupt: raise except: - result.addError(self, self._exc_info()) + result.addError(self, sys.exc_info()) try: self.tearDown() except KeyboardInterrupt: raise except: - result.addError(self, self._exc_info()) + result.addError(self, sys.exc_info()) ok = False - if ok: result.addSuccess(self) + if ok: + result.addSuccess(self) finally: result.stopTest(self) From 850acbdd9521cda8600235259fa68b8f2ab488ef Mon Sep 17 00:00:00 2001 From: Eric Day Date: Mon, 9 Aug 2010 07:31:41 -0700 Subject: [PATCH 031/101] Fixed docstring format per Jay's review. --- nova/endpoint/rackspace.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index f6735a2605..75b828e91d 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -105,7 +105,7 @@ class CloudServerAPI(wsgi.Application): return json.dumps(value) def instance_details(self, inst): # pylint: disable-msg=R0201 - "Build the data structure to represent details for an instance." + """Build the data structure to represent details for an instance.""" return { "id": inst.get("instance_id", None), "imageId": inst.get("image_id", None), From 2a069abf622029c3f3e7273ad1cc3fb17b529e63 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Mon, 9 Aug 2010 10:46:33 -0400 Subject: [PATCH 032/101] pylint fixes for nova/server.py --- nova/server.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/nova/server.py b/nova/server.py index 7a1901a2f4..96550f0782 100644 --- a/nova/server.py +++ b/nova/server.py @@ -52,13 +52,8 @@ def stop(pidfile): """ # Get the pid from the pidfile try: - pf = file(pidfile,'r') - pid = int(pf.read().strip()) - pf.close() + pid = int(open(pidfile,'r').read().strip()) except IOError: - pid = None - - if not pid: message = "pidfile %s does not exist. Daemon not running?\n" sys.stderr.write(message % pidfile) return # not an error in a restart @@ -79,14 +74,15 @@ def stop(pidfile): def serve(name, main): + """Controller for server""" argv = FLAGS(sys.argv) if not FLAGS.pidfile: FLAGS.pidfile = '%s.pid' % name - logging.debug("Full set of FLAGS: \n\n\n" ) + logging.debug("Full set of FLAGS: \n\n\n") for flag in FLAGS: - logging.debug("%s : %s" % (flag, FLAGS.get(flag, None) )) + logging.debug("%s : %s", flag, FLAGS.get(flag, None)) action = 'start' if len(argv) > 1: @@ -102,7 +98,11 @@ def serve(name, main): else: print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) + daemonize(argv, name, main) + +def daemonize(args, name, main): + """Does the work of daemonizing the process""" logging.getLogger('amqplib').setLevel(logging.WARN) if FLAGS.daemonize: logger = logging.getLogger() @@ -115,7 +115,7 @@ def serve(name, main): else: if not FLAGS.logfile: FLAGS.logfile = '%s.log' % name - logfile = logging.handlers.FileHandler(FLAGS.logfile) + logfile = logging.FileHandler(FLAGS.logfile) logfile.setFormatter(formatter) logger.addHandler(logfile) stdin, stdout, stderr = None, None, None @@ -137,4 +137,4 @@ def serve(name, main): stdout=stdout, stderr=stderr ): - main(argv) + main(args) From 948162e3bdd96cdbe5db9a0c25722ac63c04e264 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Mon, 9 Aug 2010 12:20:47 -0400 Subject: [PATCH 033/101] Disables warning about TODO in code comments in pylintrc --- nova/test.py | 1 - pylintrc | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/test.py b/nova/test.py index 820cdda567..966cbf5fc1 100644 --- a/nova/test.py +++ b/nova/test.py @@ -1,6 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # pylint: disable-msg=C0103 -# pylint: disable-msg=W0511 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. diff --git a/pylintrc b/pylintrc index a853e5bed2..36ec7b346b 100644 --- a/pylintrc +++ b/pylintrc @@ -1,6 +1,10 @@ [Basic] method-rgx=[a-z_][a-z0-9_]{2,50}$ +[MESSAGES CONTROL] +# TODOs in code comments are fine... +disable-msg=W0511 + [Design] max-public-methods=100 min-public-methods=0 From c6c222800ccc1203fc0edd7716dd808ec8f6bdc4 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Mon, 9 Aug 2010 12:37:15 -0400 Subject: [PATCH 034/101] Fix up variable names instead of disabling pylint naming rule. Makes variables able to be a single letter in pylintrc --- nova/test.py | 29 ++++++++++++++--------------- pylintrc | 6 ++++++ 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/nova/test.py b/nova/test.py index 966cbf5fc1..c7e08734fe 100644 --- a/nova/test.py +++ b/nova/test.py @@ -1,5 +1,4 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# pylint: disable-msg=C0103 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -54,7 +53,7 @@ def skip_if_fake(func): class TrialTestCase(unittest.TestCase): """Test case base class for all unit tests""" - def setUp(self): + def setUp(self): # pylint: disable-msg=C0103 """Run before each test method to initialize test environment""" super(TrialTestCase, self).setUp() @@ -64,7 +63,7 @@ class TrialTestCase(unittest.TestCase): self.stubs = stubout.StubOutForTesting() self.flag_overrides = {} - def tearDown(self): + def tearDown(self): # pylint: disable-msg=C0103 """Runs after each test method to finalize/tear down test environment""" super(TrialTestCase, self).tearDown() self.reset_flags() @@ -96,7 +95,7 @@ class TrialTestCase(unittest.TestCase): class BaseTestCase(TrialTestCase): # TODO(jaypipes): Can this be moved into the TrialTestCase class? """Base test case class for all unit tests.""" - def setUp(self): # pylint: disable-msg=W0511 + def setUp(self): # pylint: disable-msg=C0103 """Run before each test method to initialize test environment""" super(BaseTestCase, self).setUp() # TODO(termie): we could possibly keep a more global registry of @@ -108,7 +107,7 @@ class BaseTestCase(TrialTestCase): self._done_waiting = False self._timed_out = False - def tearDown(self): + def tearDown(self):# pylint: disable-msg=C0103 """Runs after each test method to finalize/tear down test environment""" super(BaseTestCase, self).tearDown() for x in self.injected: @@ -116,7 +115,7 @@ class BaseTestCase(TrialTestCase): if FLAGS.fake_rabbit: fakerabbit.reset_all() - def _waitForTest(self, timeout=60): + def _wait_for_test(self, timeout=60): """ Push the ioloop along to wait for our test to complete. """ self._waiting = self.ioloop.add_timeout(time.time() + timeout, self._timeout) @@ -146,7 +145,7 @@ class BaseTestCase(TrialTestCase): self._waiting = None self._done_waiting = True - def _maybeInlineCallbacks(self, f): + def _maybe_inline_callbacks(self, func): """ If we're doing async calls in our tests, wait on them. This is probably the most complicated hunk of code we have so far. @@ -169,7 +168,7 @@ class BaseTestCase(TrialTestCase): d.addCallback(_describe) d.addCallback(_checkDescribe) d.addCallback(lambda x: self._done()) - self._waitForTest() + self._wait_for_test() Example (inline callbacks! yay!): @@ -183,16 +182,16 @@ class BaseTestCase(TrialTestCase): # TODO(termie): this can be a wrapper function instead and # and we can make a metaclass so that we don't # have to copy all that "run" code below. - g = f() + g = func() if not hasattr(g, 'send'): self._done() return defer.succeed(g) - inlined = defer.inlineCallbacks(f) + inlined = defer.inlineCallbacks(func) d = inlined() return d - def _catchExceptions(self, result, failure): + def _catch_exceptions(self, result, failure): """Catches all exceptions and handles keyboard interrupts.""" exc = (failure.type, failure.value, failure.getTracebackObject()) if isinstance(failure.value, self.failureException): @@ -213,7 +212,7 @@ class BaseTestCase(TrialTestCase): """Runs the test case""" result.startTest(self) - testMethod = getattr(self, self._testMethodName) + test_method = getattr(self, self._testMethodName) try: try: self.setUp() @@ -225,10 +224,10 @@ class BaseTestCase(TrialTestCase): ok = False try: - d = self._maybeInlineCallbacks(testMethod) - d.addErrback(lambda x: self._catchExceptions(result, x)) + d = self._maybe_inline_callbacks(test_method) + d.addErrback(lambda x: self._catch_exceptions(result, x)) d.addBoth(lambda x: self._done() and x) - self._waitForTest() + self._wait_for_test() ok = True except self.failureException: result.addFailure(self, sys.exc_info()) diff --git a/pylintrc b/pylintrc index 36ec7b346b..045d059390 100644 --- a/pylintrc +++ b/pylintrc @@ -1,4 +1,10 @@ [Basic] +# Variables can be 1 to 31 characters long, with +# lowercase and underscores +variable-rgx=[a-z_][a-z0-9_]{0,30}$ + +# Method names should be at least 3 characters long +# and be lowecased with underscores method-rgx=[a-z_][a-z0-9_]{2,50}$ [MESSAGES CONTROL] From 09b5be11e7b61aa0ae344cec669e4f62dd18c0b2 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 9 Aug 2010 17:45:00 -0400 Subject: [PATCH 035/101] Run correctly even if called while in tools/ directory, as 'python install_venv.py' --- tools/install_venv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index 0b35fc8e9f..96bb12efb1 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -7,7 +7,7 @@ import subprocess import sys -ROOT = os.path.dirname(os.path.dirname(__file__)) +ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) VENV = os.path.join(ROOT, '.nova-venv') PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' From e3f8aa57873b7de69980c57cd05e3f1bdf6f7d08 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Mon, 9 Aug 2010 23:22:59 +0100 Subject: [PATCH 036/101] Implement the same fix as lp:~vishvananda/nova/fix-curl-project, but for virt.xenapi. --- nova/virt/xenapi.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index c3e84c2b97..9fe15644f4 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -92,12 +92,13 @@ class XenAPIConnection(object): mac_address = '' user = AuthManager().get_user(instance.datamodel['user_id']) + project = AuthManager().get_project(instance.datamodel['project_id']) vdi_uuid = yield self.fetch_image( - instance.datamodel['image_id'], user, True) + instance.datamodel['image_id'], user, project, True) kernel = yield self.fetch_image( - instance.datamodel['kernel_id'], user, False) + instance.datamodel['kernel_id'], user, project, False) ramdisk = yield self.fetch_image( - instance.datamodel['ramdisk_id'], user, False) + instance.datamodel['ramdisk_id'], user, project, False) vdi_ref = yield self._conn.xenapi.VDI.get_by_uuid(vdi_uuid) vm_ref = yield self.create_vm(instance, kernel, ramdisk) @@ -195,17 +196,18 @@ class XenAPIConnection(object): raise Exception('Found no network for bridge %s' % bridge) - def fetch_image(self, image, user, use_sr): + def fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for its kernel and ramdisk (if external kernels are being used).""" url = images.image_url(image) - logging.debug("Asking xapi to fetch %s as %s" % (url, user.access)) + access = AuthManager().get_access_key(user, project) + logging.debug("Asking xapi to fetch %s as %s" % (url, access)) fn = use_sr and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url - args['username'] = user.access + args['username'] = access args['password'] = user.secret if use_sr: args['add_partition'] = 'true' From bd0645153fb1b60a551c50c657a7837713da54a9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 9 Aug 2010 15:34:05 -0700 Subject: [PATCH 037/101] initial cleanup of tests for network --- nova/network/model.py | 39 ++++++------ nova/network/vpn.py | 26 +++++--- nova/tests/network_unittest.py | 106 +++++++++++++++++++++------------ 3 files changed, 107 insertions(+), 64 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index daac035e42..a70671632f 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -141,7 +141,6 @@ class Vlan(datastore.BasicModel): class BaseNetwork(datastore.BasicModel): override_type = 'network' - NUM_STATIC_IPS = 3 # Network, Gateway, and CloudPipe @property def identifier(self): @@ -215,16 +214,19 @@ class BaseNetwork(datastore.BasicModel): @property def available(self): - # the .2 address is always CloudPipe - # and the top are for vpn clients - for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)): + for idx in range(self.num_bottom_reserved_ips, + len(self.network) - self.num_top_reserved_ips): address = str(self.network[idx]) if not address in self.hosts.keys(): yield address @property - def num_static_ips(self): - return BaseNetwork.NUM_STATIC_IPS + def num_bottom_reserved_ips(self): + return 2 # Network, Gateway + + @property + def num_top_reserved_ips(self): + return 1 # Broadcast def allocate_ip(self, user_id, project_id, mac): for address in self.available: @@ -306,9 +308,9 @@ class DHCPNetwork(BridgedNetwork): def __init__(self, *args, **kwargs): super(DHCPNetwork, self).__init__(*args, **kwargs) # logging.debug("Initing DHCPNetwork object...") - self.dhcp_listen_address = self.network[1] - self.dhcp_range_start = self.network[3] - self.dhcp_range_end = self.network[-(1 + FLAGS.cnt_vpn_clients)] + self.dhcp_listen_address = self.gateway + self.dhcp_range_start = self.network[self.num_bottom_reserved_ips] + self.dhcp_range_end = self.network[-self.num_top_reserved_ips] try: os.makedirs(FLAGS.networks_path) # NOTE(todd): I guess this is a lazy way to not have to check if the @@ -318,6 +320,16 @@ class DHCPNetwork(BridgedNetwork): except Exception, err: pass + @property + def num_bottom_reserved_ips(self): + # For cloudpipe + return super(DHCPNetwork, self).num_bottom_reserved_ips + 1 + + @property + def num_top_reserved_ips(self): + return super(DHCPNetwork, self).num_top_reserved_ips + \ + FLAGS.cnt_vpn_clients + def express(self, address=None): super(DHCPNetwork, self).express(address=address) if len(self.assigned) > 0: @@ -388,13 +400,6 @@ class PublicNetworkController(BaseNetwork): self.save() self.express() - @property - def available(self): - for idx in range(2, len(self.network)-1): - address = str(self.network[idx]) - if not address in self.hosts.keys(): - yield address - @property def host_objs(self): for address in self.assigned: @@ -415,7 +420,7 @@ class PublicNetworkController(BaseNetwork): def deallocate_ip(self, ip_str): # NOTE(vish): cleanup is now done on release by the parent class - self.release_ip(ip_str) + self.release_ip(ip_str) def associate_address(self, public_ip, private_ip, instance_id): if not public_ip in self.assigned: diff --git a/nova/network/vpn.py b/nova/network/vpn.py index cec84287cd..1b6dd7a56c 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -74,23 +74,31 @@ class NetworkData(datastore.BasicModel): # similar to an association, but we are just # storing a set of values instead of keys that # should be turned into objects. - redis = datastore.Redis.instance() - key = 'ip:%s:ports' % ip - # TODO(vish): these ports should be allocated through an admin - # command instead of a flag - if (not redis.exists(key) and - not redis.exists(cls._redis_association_name('ip', ip))): - for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1): - redis.sadd(key, i) + cls._ensure_set_exists(ip) - port = redis.spop(key) + port = datastore.Redis.instance().spop(cls._redis_ports_key(ip)) if not port: raise NoMorePorts() return port + @classmethod + def _redis_ports_key(cls, ip): + return 'ip:%s:ports' % ip + + @classmethod + def _ensure_set_exists(cls, ip): + # TODO(vish): these ports should be allocated through an admin + # command instead of a flag + redis = datastore.Redis.instance() + if (not redis.exists(cls._redis_ports_key(ip)) and + not redis.exists(cls._redis_association_name('ip', ip))): + for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1): + redis.sadd(cls._redis_ports_key(ip), i) + @classmethod def num_ports_for_ip(cls, ip): """Calculates the number of free ports for a given ip""" + cls._ensure_set_exists(ip) return datastore.Redis.instance().scard('ip:%s:ports' % ip) @property diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 879ee02a47..94d10200ea 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -54,6 +54,7 @@ class NetworkTestCase(test.TrialTestCase): self.projects.append(self.manager.create_project(name, 'netuser', name)) + vpn.NetworkData.create(self.projects[i].id) self.network = model.PublicNetworkController() self.service = service.VlanNetworkService() @@ -70,7 +71,7 @@ class NetworkTestCase(test.TrialTestCase): self.assertTrue(IPy.IP(address) in self.network.network) def test_allocate_deallocate_fixed_ip(self): - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user.id, self.projects[0].id) address = result['private_dns_name'] mac = result['mac_address'] @@ -89,11 +90,11 @@ class NetworkTestCase(test.TrialTestCase): def test_range_allocation(self): hostname = "test-host" - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user.id, self.projects[0].id) mac = result['mac_address'] address = result['private_dns_name'] - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user, self.projects[1].id) secondmac = result['mac_address'] secondaddress = result['private_dns_name'] @@ -123,21 +124,21 @@ class NetworkTestCase(test.TrialTestCase): self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id)) def test_subnet_edge(self): - result = yield self.service.allocate_fixed_ip(self.user.id, + result = self.service.allocate_fixed_ip(self.user.id, self.projects[0].id) firstaddress = result['private_dns_name'] hostname = "toomany-hosts" for i in range(1,5): project_id = self.projects[i].id - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user, project_id) mac = result['mac_address'] address = result['private_dns_name'] - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user, project_id) mac2 = result['mac_address'] address2 = result['private_dns_name'] - result = yield self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user, project_id) mac3 = result['mac_address'] address3 = result['private_dns_name'] @@ -155,8 +156,7 @@ class NetworkTestCase(test.TrialTestCase): rv = self.service.deallocate_fixed_ip(firstaddress) self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name) - def test_212_vpn_ip_and_port_looks_valid(self): - vpn.NetworkData.create(self.projects[0].id) + def test_vpn_ip_and_port_looks_valid(self): self.assert_(self.projects[0].vpn_ip) self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port) self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port) @@ -169,55 +169,85 @@ class NetworkTestCase(test.TrialTestCase): for network_datum in vpns: network_datum.destroy() - def test_release_before_deallocate(self): - pass + def test_ips_are_reused(self): + """Makes sure that ip addresses that are deallocated get reused""" - def test_deallocate_before_issued(self): - pass + result = self.service.allocate_fixed_ip( + self.user.id, self.projects[0].id) + mac = result['mac_address'] + address = result['private_dns_name'] - def test_too_many_addresses(self): - """ - Here, we test that a proper NoMoreAddresses exception is raised. + hostname = "reuse-host" + net = model.get_project_network(self.projects[0].id, "default") - However, the number of available IP addresses depends on the test + self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) + rv = self.service.deallocate_fixed_ip(address) + self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) + + result = self.service.allocate_fixed_ip( + self.user, self.projects[0].id) + secondmac = result['mac_address'] + secondaddress = result['private_dns_name'] + self.assertEqual(address, secondaddress) + rv = self.service.deallocate_fixed_ip(secondaddress) + self.dnsmasq.issue_ip(secondmac, + secondaddress, + hostname, + net.bridge_name) + self.dnsmasq.release_ip(secondmac, + secondaddress, + hostname, + net.bridge_name) + + def test_available_ips(self): + """Make sure the number of available ips for the network is correct + + The number of available IP addresses depends on the test environment's setup. Network size is set in test fixture's setUp method. - There are FLAGS.cnt_vpn_clients addresses reserved for VPN (NUM_RESERVED_VPN_IPS) - - And there are NUM_STATIC_IPS that are always reserved by Nova for the necessary - services (gateway, CloudPipe, etc) - - So we should get flags.network_size - (NUM_STATIC_IPS + - NUM_PREALLOCATED_IPS + - NUM_RESERVED_VPN_IPS) - usable addresses + There are ips reserved at the bottom and top of the range. + services (network, gateway, CloudPipe, broadcast) """ net = model.get_project_network(self.projects[0].id, "default") - - # Determine expected number of available IP addresses - num_static_ips = net.num_static_ips num_preallocated_ips = len(net.hosts.keys()) - num_reserved_vpn_ips = flags.FLAGS.cnt_vpn_clients - num_available_ips = flags.FLAGS.network_size - (num_static_ips + + num_available_ips = flags.FLAGS.network_size - (net.num_bottom_reserved_ips + num_preallocated_ips + - num_reserved_vpn_ips) + net.num_top_reserved_ips) + self.assertEqual(num_available_ips, len(list(net.available))) + + def test_too_many_addresses(self): + """Test for a NoMoreAddresses exception when all fixed ips are used. + """ + net = model.get_project_network(self.projects[0].id, "default") hostname = "toomany-hosts" macs = {} addresses = {} - for i in range(0, (num_available_ips - 1)): - result = yield self.service.allocate_fixed_ip(self.user.id, self.projects[0].id) + # Number of availaible ips is len of the available list + num_available_ips = len(list(net.available)) + for i in range(num_available_ips): + result = self.service.allocate_fixed_ip(self.user.id, + self.projects[0].id) macs[i] = result['mac_address'] addresses[i] = result['private_dns_name'] - self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name) + self.dnsmasq.issue_ip(macs[i], + addresses[i], + hostname, + net.bridge_name) - self.assertFailure(self.service.allocate_fixed_ip(self.user.id, self.projects[0].id), NoMoreAddresses) + self.assertEqual(len(list(net.available)), 0) + self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip, + self.user.id, self.projects[0].id) - for i in range(0, (num_available_ips - 1)): + for i in range(len(addresses)): rv = self.service.deallocate_fixed_ip(addresses[i]) - self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name) + self.dnsmasq.release_ip(macs[i], + addresses[i], + hostname, + net.bridge_name) + self.assertEqual(len(list(net.available)), num_available_ips) def is_in_project(address, project_id): return address in model.get_project_network(project_id).list_addresses() From 8990a62b0e654dcacac06246733a17fa0502bcc7 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Mon, 9 Aug 2010 17:53:10 -0700 Subject: [PATCH 038/101] fixing - removing unused imports per Eric & Jay review --- nova/auth/manager.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index e5efbca246..6d71a7ad69 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -35,10 +35,6 @@ from nova import utils from nova.auth import signer from nova.network import vpn -#unused imports -#from nova import datastore -#from nova.auth import ldapdriver # for flags -#from nova import objectstore # for flags FLAGS = flags.FLAGS From d8c1a74342af9af442e4ef0508fa1f66eac48bb5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 9 Aug 2010 23:02:06 -0700 Subject: [PATCH 039/101] fix releasing to work properly --- nova/network/model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index a70671632f..109ae64c7f 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -243,12 +243,12 @@ class BaseNetwork(datastore.BasicModel): def release_ip(self, ip_str): if not ip_str in self.assigned: raise exception.AddressNotAllocated() - self.deexpress(address=ip_str) self._rem_host(ip_str) + self.deexpress(address=ip_str) def deallocate_ip(self, ip_str): - # Do nothing for now, cleanup on ip release - pass + # go ahead and remove ip + self.release_ip(ip_str) def list_addresses(self): for address in self.hosts: From 8c7558ed5ae7dd0b78a91a385dbd9b044ec7c8db Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Tue, 10 Aug 2010 12:44:38 -0400 Subject: [PATCH 040/101] Changes the run_tests.sh and /tools/install_venv.py scripts to be more user-friendly and not depend on PIP while not in the virtual environment. Running run_tests.sh should not just work out of the box on all systems supporting easy_install... --- run_tests.sh | 7 +++-- tools/install_venv.py | 59 ++++++++++++++++++++++++++++++++----------- 2 files changed, 47 insertions(+), 19 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 9b2de7aea6..85d7c88341 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -6,8 +6,7 @@ with_venv=tools/with_venv.sh if [ -e ${venv} ]; then ${with_venv} python run_tests.py $@ else - echo "You need to install the Nova virtualenv before you can run this." - echo "" - echo "Please run tools/install_venv.py" - exit 1 + echo "No virtual environment found...creating one" + python tools/install_venv.py + ${with_venv} python run_tests.py $@ fi diff --git a/tools/install_venv.py b/tools/install_venv.py index 0b35fc8e9f..adf24b365e 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -1,3 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2010 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + """ Installation script for Nova's development virtualenv """ @@ -12,15 +32,14 @@ VENV = os.path.join(ROOT, '.nova-venv') PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' - def die(message, *args): print >>sys.stderr, message % args sys.exit(1) - def run_command(cmd, redirect_output=True, error_ok=False): - # Useful for debugging: - #print >>sys.stderr, ' '.join(cmd) + """Runs a command in an out-of-process shell, returning the + output of that command + """ if redirect_output: stdout = subprocess.PIPE else: @@ -32,33 +51,43 @@ def run_command(cmd, redirect_output=True, error_ok=False): die('Command "%s" failed.\n%s', ' '.join(cmd), output) return output +HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install']).strip()) +HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv']).strip()) def check_dependencies(): - """Make sure pip and virtualenv are on the path.""" - print 'Checking for pip...', - if not run_command(['which', 'pip']).strip(): - die('ERROR: pip not found.\n\nNova development requires pip,' - ' please install it using your favorite package management tool') - print 'done.' + """Make sure virtualenv is in the path.""" print 'Checking for virtualenv...', - if not run_command(['which', 'virtualenv']).strip(): - die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' - ' please install it using your favorite package management tool') + if not HAS_VIRTUALENV: + print 'not found.' + # Try installing it via easy_install... + if HAS_EASY_INSTALL: + if not run_command(['which', 'easy_install']): + print 'Installing virtualenv via easy_install...', + die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' + ' please install it using your favorite package management tool') + print 'done.' print 'done.' def create_virtualenv(venv=VENV): + """Creates the virtual environment and installs PIP only into the + virtual environment + """ print 'Creating venv...', run_command(['virtualenv', '-q', '--no-site-packages', VENV]) print 'done.' + print 'Installing pip in virtualenv...', + if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip(): + die("Failed to install pip.") + print 'done.' def install_dependencies(venv=VENV): print 'Installing dependencies with pip (this can take a while)...' - run_command(['pip', 'install', '-E', venv, '-r', PIP_REQUIRES], + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES], redirect_output=False) - run_command(['pip', 'install', '-E', venv, TWISTED_NOVA], + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, TWISTED_NOVA], redirect_output=False) From f5695429db27110d8a95df3b66e4045c59d88c6a Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Tue, 10 Aug 2010 12:51:03 -0400 Subject: [PATCH 041/101] Quick fix on location of printouts when trying to install virtualenv. --- tools/install_venv.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index adf24b365e..494535b5ef 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -36,6 +36,7 @@ def die(message, *args): print >>sys.stderr, message % args sys.exit(1) + def run_command(cmd, redirect_output=True, error_ok=False): """Runs a command in an out-of-process shell, returning the output of that command @@ -51,9 +52,11 @@ def run_command(cmd, redirect_output=True, error_ok=False): die('Command "%s" failed.\n%s', ' '.join(cmd), output) return output + HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install']).strip()) HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv']).strip()) + def check_dependencies(): """Make sure virtualenv is in the path.""" @@ -62,8 +65,8 @@ def check_dependencies(): print 'not found.' # Try installing it via easy_install... if HAS_EASY_INSTALL: + print 'Installing virtualenv via easy_install...', if not run_command(['which', 'easy_install']): - print 'Installing virtualenv via easy_install...', die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' ' please install it using your favorite package management tool') print 'done.' From 7a1709561f1fed6e46a1c31aaa8e3ac54b9eebd3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 10:25:52 -0700 Subject: [PATCH 042/101] rename create_zip to zipfile so lazy match works --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 2dd569df0c..6af092922a 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -203,7 +203,7 @@ class ProjectCommands(object): arguments: project user""" self.manager.remove_from_project(user, project) - def create_zip(self, project_id, user_id, filename='nova.zip'): + def zipfile(self, project_id, user_id, filename='nova.zip'): """Exports credentials for project to a zip file arguments: project_id user_id [filename='nova.zip]""" zip_file = self.manager.get_credentials(project_id, user_id) From fadaf1d9842abb991b093b04c031fa9947d82fbc Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 11:48:14 -0700 Subject: [PATCH 043/101] pep8 cleanup --- nova/network/exception.py | 5 +++- nova/network/linux_net.py | 46 +++++++++++++++++++++++-------- nova/network/model.py | 39 ++++++++++++++++---------- nova/network/service.py | 13 ++++++--- nova/network/vpn.py | 2 +- nova/tests/network_unittest.py | 50 +++++++++++++++++++--------------- 6 files changed, 100 insertions(+), 55 deletions(-) diff --git a/nova/network/exception.py b/nova/network/exception.py index 5722e96729..884ea54b48 100644 --- a/nova/network/exception.py +++ b/nova/network/exception.py @@ -26,15 +26,18 @@ from nova.exception import Error class NoMoreAddresses(Error): pass + class AddressNotAllocated(Error): pass + class AddressAlreadyAssociated(Error): pass + class AddressNotAssociated(Error): pass + class NotValidNetworkSize(Error): pass - diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4a4b4c8a8a..35bfded490 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -23,15 +23,16 @@ import subprocess # todo(ja): does the definition of network_path belong here? +from nova import flags from nova import utils -from nova import flags -FLAGS=flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_string('dhcpbridge_flagfile', '/etc/nova/nova-dhcpbridge.conf', 'location of flagfile for dhcpbridge') + def execute(cmd, addl_env=None): if FLAGS.fake_network: logging.debug("FAKE NET: %s" % cmd) @@ -39,11 +40,13 @@ def execute(cmd, addl_env=None): else: return utils.execute(cmd, addl_env=addl_env) + def runthis(desc, cmd): if FLAGS.fake_network: return execute(cmd) else: - return utils.runthis(desc,cmd) + return utils.runthis(desc, cmd) + def Popen(cmd): if FLAGS.fake_network: @@ -56,18 +59,25 @@ def device_exists(device): (out, err) = execute("ifconfig %s" % device) return not err + def confirm_rule(cmd): execute("sudo iptables --delete %s" % (cmd)) execute("sudo iptables -I %s" % (cmd)) + def remove_rule(cmd): execute("sudo iptables --delete %s" % (cmd)) + def bind_public_ip(ip, interface): - runthis("Binding IP to interface: %s", "sudo ip addr add %s dev %s" % (ip, interface)) + runthis("Binding IP to interface: %s", + "sudo ip addr add %s dev %s" % (ip, interface)) + def unbind_public_ip(ip, interface): - runthis("Binding IP to interface: %s", "sudo ip addr del %s dev %s" % (ip, interface)) + runthis("Binding IP to interface: %s", + "sudo ip addr del %s dev %s" % (ip, interface)) + def vlan_create(net): """ create a vlan on on a bridge device unless vlan already exists """ @@ -77,6 +87,7 @@ def vlan_create(net): execute("sudo vconfig add %s %s" % (FLAGS.bridge_dev, net['vlan'])) execute("sudo ifconfig vlan%s up" % (net['vlan'])) + def bridge_create(net): """ create a bridge on a vlan unless it already exists """ if not device_exists(net['bridge_name']): @@ -85,14 +96,17 @@ def bridge_create(net): execute("sudo brctl setfd %s 0" % (net.bridge_name)) # execute("sudo brctl setageing %s 10" % (net.bridge_name)) execute("sudo brctl stp %s off" % (net['bridge_name'])) - execute("sudo brctl addif %s vlan%s" % (net['bridge_name'], net['vlan'])) + execute("sudo brctl addif %s vlan%s" % (net['bridge_name'], + net['vlan'])) if net.bridge_gets_ip: execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ (net['bridge_name'], net.gateway, net.broadcast, net.netmask)) - confirm_rule("FORWARD --in-interface %s -j ACCEPT" % (net['bridge_name'])) + confirm_rule("FORWARD --in-interface %s -j ACCEPT" % + (net['bridge_name'])) else: execute("sudo ifconfig %s up" % net['bridge_name']) + def dnsmasq_cmd(net): cmd = ['sudo -E dnsmasq', ' --strict-order', @@ -107,12 +121,15 @@ def dnsmasq_cmd(net): ' --leasefile-ro'] return ''.join(cmd) + def hostDHCP(network, host, mac): - idx = host.split(".")[-1] # Logically, the idx of instances they've launched in this net + # Logically, the idx of instances they've launched in this net + idx = host.split(".")[-1] return "%s,%s-%s-%s.novalocal,%s" % \ (mac, network['user_id'], network['vlan'], idx, host) -# todo(ja): if the system has restarted or pid numbers have wrapped + +# TODO(ja): if the system has restarted or pid numbers have wrapped # then you cannot be certain that the pid refers to the # dnsmasq. As well, sending a HUP only reloads the hostfile, # so any configuration options (like dchp-range, vlan, ...) @@ -125,13 +142,15 @@ def start_dnsmasq(network): """ with open(dhcp_file(network['vlan'], 'conf'), 'w') as f: for host_name in network.hosts: - f.write("%s\n" % hostDHCP(network, host_name, network.hosts[host_name])) + f.write("%s\n" % hostDHCP(network, + host_name, + network.hosts[host_name])) pid = dnsmasq_pid_for(network) # if dnsmasq is already running, then tell it to reload if pid: - # todo(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers + # TODO(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers # correct dnsmasq process try: os.kill(pid, signal.SIGHUP) @@ -148,6 +167,7 @@ def start_dnsmasq(network): 'DNSMASQ_INTERFACE': network['bridge_name']} execute(dnsmasq_cmd(network), addl_env=env) + def stop_dnsmasq(network): """ stops the dnsmasq instance for a given network """ pid = dnsmasq_pid_for(network) @@ -158,14 +178,17 @@ def stop_dnsmasq(network): except Exception, e: logging.debug("Killing dnsmasq threw %s", e) + def dhcp_file(vlan, kind): """ return path to a pid, leases or conf file for a vlan """ return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind)) + def bin_file(script): return os.path.abspath(os.path.join(__file__, "../../../bin", script)) + def dnsmasq_pid_for(network): """ the pid for prior dnsmasq instance for a vlan, returns None if no pid file exists @@ -178,4 +201,3 @@ def dnsmasq_pid_for(network): if os.path.exists(pid_file): with open(pid_file, 'r') as f: return int(f.read()) - diff --git a/nova/network/model.py b/nova/network/model.py index 2074a6d46d..734a3f7a92 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -119,7 +119,9 @@ class Vlan(datastore.BasicModel): default way of saving into "vlan:ID" and adding to a set of "vlans". """ set_name = self._redis_set_name(self.__class__.__name__) - datastore.Redis.instance().hset(set_name, self.project_id, self.vlan_id) + datastore.Redis.instance().hset(set_name, + self.project_id, + self.vlan_id) @datastore.absorb_connection_error def destroy(self): @@ -129,17 +131,16 @@ class Vlan(datastore.BasicModel): def subnet(self): vlan = int(self.vlan_id) network = IPy.IP(FLAGS.private_range) - start = (vlan-FLAGS.vlan_start) * FLAGS.network_size + start = (vlan - FLAGS.vlan_start) * FLAGS.network_size # minus one for the gateway. return "%s-%s" % (network[start], network[start + FLAGS.network_size - 1]) + # CLEANUP: # TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients # TODO(ja): does vlanpool "keeper" need to know the min/max - # shouldn't FLAGS always win? -# TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients - class BaseNetwork(datastore.BasicModel): override_type = 'network' @@ -223,11 +224,11 @@ class BaseNetwork(datastore.BasicModel): @property def num_bottom_reserved_ips(self): - return 2 # Network, Gateway + return 2 # Network, Gateway @property def num_top_reserved_ips(self): - return 1 # Broadcast + return 1 # Broadcast def allocate_ip(self, user_id, project_id, mac): for address in self.available: @@ -257,8 +258,11 @@ class BaseNetwork(datastore.BasicModel): for address in self.hosts: yield address - def express(self, address=None): pass - def deexpress(self, address=None): pass + def express(self, address=None): + pass + + def deexpress(self, address=None): + pass class BridgedNetwork(BaseNetwork): @@ -298,6 +302,7 @@ class BridgedNetwork(BaseNetwork): linux_net.vlan_create(self) linux_net.bridge_create(self) + class DHCPNetwork(BridgedNetwork): """ properties: @@ -365,6 +370,7 @@ class DHCPNetwork(BridgedNetwork): else: linux_net.start_dnsmasq(self) + class PublicAddress(datastore.BasicModel): override_type = "address" @@ -391,6 +397,8 @@ class PublicAddress(datastore.BasicModel): DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] + + class PublicNetworkController(BaseNetwork): override_type = 'network' @@ -400,7 +408,8 @@ class PublicNetworkController(BaseNetwork): FLAGS.public_range) self['user_id'] = "public" self['project_id'] = "public" - self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', + time.gmtime()) self["vlan"] = FLAGS.public_vlan self.save() self.express() @@ -458,7 +467,7 @@ class PublicNetworkController(BaseNetwork): if address: addresses = [self.get_host(address)] for addr in addresses: - if addr.get('private_ip','available') == 'available': + if addr.get('private_ip', 'available') == 'available': continue public_ip = addr['address'] private_ip = addr['private_ip'] @@ -490,8 +499,9 @@ class PublicNetworkController(BaseNetwork): % (private_ip, protocol, port)) -# FIXME(todd): does this present a race condition, or is there some piece of -# architecture that mitigates it (only one queue listener per net)? +# FIXME(todd): does this present a race condition, or is there some +# piece of architecture that mitigates it (only one queue +# listener per net)? def get_vlan_for_project(project_id): """ Allocate vlan IDs to individual users. @@ -502,7 +512,7 @@ def get_vlan_for_project(project_id): known_vlans = Vlan.dict_by_vlan() for vnum in range(FLAGS.vlan_start, FLAGS.vlan_end): vstr = str(vnum) - if not known_vlans.has_key(vstr): + if not vstr in known_vlans: return Vlan.create(project_id, vnum) old_project_id = known_vlans[vstr] if not manager.AuthManager().get_project(old_project_id): @@ -526,6 +536,7 @@ def get_vlan_for_project(project_id): return Vlan.create(project_id, vnum) raise exception.AddressNotAllocated("Out of VLANs") + def get_project_network(project_id, security_group='default'): """ get a project's private network, allocating one if needed """ project = manager.AuthManager().get_project(project_id) @@ -556,10 +567,8 @@ def get_network_by_interface(iface, security_group='default'): return get_project_network(project_id, security_group) - def get_public_ip_for_instance(instance_id): # FIXME: this should be a lookup - iteration won't scale for address_record in PublicAddress.all(): if address_record.get('instance_id', 'available') == instance_id: return address_record['address'] - diff --git a/nova/network/service.py b/nova/network/service.py index 1a61f49d45..f133241039 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -38,7 +38,7 @@ flags.DEFINE_string('network_type', flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') flags.DEFINE_list('flat_network_ips', - ['192.168.0.2','192.168.0.3','192.168.0.4'], + ['192.168.0.2', '192.168.0.3', '192.168.0.4'], 'Available ips for simple network') flags.DEFINE_string('flat_network_network', '192.168.0.0', 'Network for simple network') @@ -51,17 +51,21 @@ flags.DEFINE_string('flat_network_broadcast', '192.168.0.255', flags.DEFINE_string('flat_network_dns', '8.8.4.4', 'Dns for simple network') + def type_to_class(network_type): if network_type == 'flat': return FlatNetworkService - elif network_type == 'vlan': + elif network_type == 'vlan': return VlanNetworkService raise NotFound("Couldn't find %s network type" % network_type) def setup_compute_network(network_type, user_id, project_id, security_group): srv = type_to_class(network_type) - srv.setup_compute_network(network_type, user_id, project_id, security_group) + srv.setup_compute_network(network_type, + user_id, + project_id, + security_group) def get_host_for_project(project_id): @@ -175,6 +179,7 @@ class FlatNetworkService(BaseNetworkService): """Returns an ip to the pool""" datastore.Redis.instance().sadd('ips', fixed_ip) + class VlanNetworkService(BaseNetworkService): """Vlan network with dhcp""" # NOTE(vish): A lot of the interactions with network/model.py can be @@ -194,7 +199,7 @@ class VlanNetworkService(BaseNetworkService): return {'network_type': FLAGS.network_type, 'bridge_name': net['bridge_name'], 'mac_address': mac, - 'private_dns_name' : fixed_ip} + 'private_dns_name': fixed_ip} def deallocate_fixed_ip(self, fixed_ip, *args, **kwargs): diff --git a/nova/network/vpn.py b/nova/network/vpn.py index 1b6dd7a56c..74eebf9a8b 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -33,6 +33,7 @@ flags.DEFINE_integer('vpn_start_port', 1000, flags.DEFINE_integer('vpn_end_port', 2000, 'End port for the cloudpipe VPN servers') + class NoMorePorts(exception.Error): pass @@ -121,4 +122,3 @@ class NetworkData(datastore.BasicModel): self.unassociate_with('ip', self.ip) datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port) super(NetworkData, self).destroy() - diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 94d10200ea..9aa39e516c 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -31,6 +31,7 @@ from nova.network.exception import NoMoreAddresses FLAGS = flags.FLAGS + class NetworkTestCase(test.TrialTestCase): def setUp(self): super(NetworkTestCase, self).setUp() @@ -66,12 +67,14 @@ class NetworkTestCase(test.TrialTestCase): def test_public_network_allocation(self): pubnet = IPy.IP(flags.FLAGS.public_range) - address = self.network.allocate_ip(self.user.id, self.projects[0].id, "public") + address = self.network.allocate_ip(self.user.id, + self.projects[0].id, + "public") self.assertTrue(IPy.IP(address) in pubnet) self.assertTrue(IPy.IP(address) in self.network.network) def test_allocate_deallocate_fixed_ip(self): - result = self.service.allocate_fixed_ip( + result = self.service.allocate_fixed_ip( self.user.id, self.projects[0].id) address = result['private_dns_name'] mac = result['mac_address'] @@ -103,7 +106,8 @@ class NetworkTestCase(test.TrialTestCase): secondnet = model.get_project_network(self.projects[1].id, "default") self.assertEqual(True, is_in_project(address, self.projects[0].id)) - self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id)) + self.assertEqual(True, is_in_project(secondaddress, + self.projects[1].id)) self.assertEqual(False, is_in_project(address, self.projects[1].id)) # Addresses are allocated before they're issued @@ -116,19 +120,21 @@ class NetworkTestCase(test.TrialTestCase): self.assertEqual(False, is_in_project(address, self.projects[0].id)) # First address release shouldn't affect the second - self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id)) + self.assertEqual(True, is_in_project(secondaddress, + self.projects[1].id)) rv = self.service.deallocate_fixed_ip(secondaddress) self.dnsmasq.release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) - self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id)) + self.assertEqual(False, is_in_project(secondaddress, + self.projects[1].id)) def test_subnet_edge(self): result = self.service.allocate_fixed_ip(self.user.id, self.projects[0].id) firstaddress = result['private_dns_name'] hostname = "toomany-hosts" - for i in range(1,5): + for i in range(1, 5): project_id = self.projects[i].id result = self.service.allocate_fixed_ip( self.user, project_id) @@ -142,9 +148,12 @@ class NetworkTestCase(test.TrialTestCase): self.user, project_id) mac3 = result['mac_address'] address3 = result['private_dns_name'] - self.assertEqual(False, is_in_project(address, self.projects[0].id)) - self.assertEqual(False, is_in_project(address2, self.projects[0].id)) - self.assertEqual(False, is_in_project(address3, self.projects[0].id)) + self.assertEqual(False, is_in_project(address, + self.projects[0].id)) + self.assertEqual(False, is_in_project(address2, + self.projects[0].id)) + self.assertEqual(False, is_in_project(address3, + self.projects[0].id)) rv = self.service.deallocate_fixed_ip(address) rv = self.service.deallocate_fixed_ip(address2) rv = self.service.deallocate_fixed_ip(address3) @@ -212,9 +221,10 @@ class NetworkTestCase(test.TrialTestCase): """ net = model.get_project_network(self.projects[0].id, "default") num_preallocated_ips = len(net.hosts.keys()) - num_available_ips = flags.FLAGS.network_size - (net.num_bottom_reserved_ips + - num_preallocated_ips + - net.num_top_reserved_ips) + net_size = flags.FLAGS.network_size + num_available_ips = net_size - (net.num_bottom_reserved_ips + + num_preallocated_ips + + net.num_top_reserved_ips) self.assertEqual(num_available_ips, len(list(net.available))) def test_too_many_addresses(self): @@ -249,25 +259,22 @@ class NetworkTestCase(test.TrialTestCase): net.bridge_name) self.assertEqual(len(list(net.available)), num_available_ips) + def is_in_project(address, project_id): return address in model.get_project_network(project_id).list_addresses() -def _get_project_addresses(project_id): - project_addresses = [] - for addr in model.get_project_network(project_id).list_addresses(): - project_addresses.append(addr) - return project_addresses def binpath(script): return os.path.abspath(os.path.join(__file__, "../../../bin", script)) + class FakeDNSMasq(object): def issue_ip(self, mac, ip, hostname, interface): cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'), mac, ip, hostname) env = {'DNSMASQ_INTERFACE': interface, - 'TESTING' : '1', - 'FLAGFILE' : FLAGS.dhcpbridge_flagfile} + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) logging.debug("ISSUE_IP: %s, %s " % (out, err)) @@ -275,8 +282,7 @@ class FakeDNSMasq(object): cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'), mac, ip, hostname) env = {'DNSMASQ_INTERFACE': interface, - 'TESTING' : '1', - 'FLAGFILE' : FLAGS.dhcpbridge_flagfile} + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) logging.debug("RELEASE_IP: %s, %s " % (out, err)) - From c4f6500a4c33d4ad093d29f971c139b63984a0a5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 12:27:06 -0700 Subject: [PATCH 044/101] pylint cleanup --- bin/nova-dhcpbridge | 5 +-- nova/network/exception.py | 5 +++ nova/network/linux_net.py | 66 +++++++++++++++++++++------------------ 3 files changed, 44 insertions(+), 32 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 7789dac989..6a9115fcbd 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -70,8 +70,9 @@ def init_leases(interface): net = model.get_network_by_interface(interface) res = "" for host_name in net.hosts: - res += "%s\n" % linux_net.hostDHCP(net, host_name, - net.hosts[host_name]) + res += "%s\n" % linux_net.host_dhcp(net, + host_name, + net.hosts[host_name]) return res diff --git a/nova/network/exception.py b/nova/network/exception.py index 884ea54b48..8d7aa14985 100644 --- a/nova/network/exception.py +++ b/nova/network/exception.py @@ -24,20 +24,25 @@ from nova.exception import Error class NoMoreAddresses(Error): + """No More Addresses are available in the network""" pass class AddressNotAllocated(Error): + """The specified address has not been allocated""" pass class AddressAlreadyAssociated(Error): + """The specified address has already been associated""" pass class AddressNotAssociated(Error): + """The specified address is not associated""" pass class NotValidNetworkSize(Error): + """The network size is not valid""" pass diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 35bfded490..2f6a9638d6 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -15,11 +15,13 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +""" +Implements vlans, bridges, and iptables rules using linux utilities. +""" import logging import signal import os -import subprocess # todo(ja): does the definition of network_path belong here? @@ -34,53 +36,53 @@ flags.DEFINE_string('dhcpbridge_flagfile', def execute(cmd, addl_env=None): + """Wrapper around utils.execute for fake_network""" if FLAGS.fake_network: - logging.debug("FAKE NET: %s" % cmd) + logging.debug("FAKE NET: %s", cmd) return "fake", 0 else: return utils.execute(cmd, addl_env=addl_env) def runthis(desc, cmd): + """Wrapper around utils.runthis for fake_network""" if FLAGS.fake_network: return execute(cmd) else: return utils.runthis(desc, cmd) -def Popen(cmd): - if FLAGS.fake_network: - execute(' '.join(cmd)) - else: - subprocess.Popen(cmd) - - def device_exists(device): - (out, err) = execute("ifconfig %s" % device) + """Check if ethernet device exists""" + (_out, err) = execute("ifconfig %s" % device) return not err def confirm_rule(cmd): + """Delete and re-add iptables rule""" execute("sudo iptables --delete %s" % (cmd)) execute("sudo iptables -I %s" % (cmd)) def remove_rule(cmd): + """Remove iptables rule""" execute("sudo iptables --delete %s" % (cmd)) -def bind_public_ip(ip, interface): +def bind_public_ip(public_ip, interface): + """Bind ip to an interface""" runthis("Binding IP to interface: %s", - "sudo ip addr add %s dev %s" % (ip, interface)) + "sudo ip addr add %s dev %s" % (public_ip, interface)) -def unbind_public_ip(ip, interface): +def unbind_public_ip(public_ip, interface): + """Unbind a public ip from an interface""" runthis("Binding IP to interface: %s", - "sudo ip addr del %s dev %s" % (ip, interface)) + "sudo ip addr del %s dev %s" % (public_ip, interface)) def vlan_create(net): - """ create a vlan on on a bridge device unless vlan already exists """ + """Create a vlan on on a bridge device unless vlan already exists""" if not device_exists("vlan%s" % net['vlan']): logging.debug("Starting VLAN inteface for %s network", (net['vlan'])) execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") @@ -89,7 +91,7 @@ def vlan_create(net): def bridge_create(net): - """ create a bridge on a vlan unless it already exists """ + """Create a bridge on a vlan unless it already exists""" if not device_exists(net['bridge_name']): logging.debug("Starting Bridge inteface for %s network", (net['vlan'])) execute("sudo brctl addbr %s" % (net['bridge_name'])) @@ -107,7 +109,8 @@ def bridge_create(net): execute("sudo ifconfig %s up" % net['bridge_name']) -def dnsmasq_cmd(net): +def _dnsmasq_cmd(net): + """Builds dnsmasq command""" cmd = ['sudo -E dnsmasq', ' --strict-order', ' --bind-interfaces', @@ -122,7 +125,8 @@ def dnsmasq_cmd(net): return ''.join(cmd) -def hostDHCP(network, host, mac): +def host_dhcp(network, host, mac): + """Return a host string for a network, host, and mac""" # Logically, the idx of instances they've launched in this net idx = host.split(".")[-1] return "%s,%s-%s-%s.novalocal,%s" % \ @@ -135,14 +139,14 @@ def hostDHCP(network, host, mac): # so any configuration options (like dchp-range, vlan, ...) # aren't reloaded def start_dnsmasq(network): - """ (re)starts a dnsmasq server for a given network + """(Re)starts a dnsmasq server for a given network if a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance """ with open(dhcp_file(network['vlan'], 'conf'), 'w') as f: for host_name in network.hosts: - f.write("%s\n" % hostDHCP(network, + f.write("%s\n" % host_dhcp(network, host_name, network.hosts[host_name])) @@ -154,8 +158,8 @@ def start_dnsmasq(network): # correct dnsmasq process try: os.kill(pid, signal.SIGHUP) - except Exception, e: - logging.debug("Hupping dnsmasq threw %s", e) + except Exception as exc: # pylint: disable-msg=W0703 + logging.debug("Hupping dnsmasq threw %s", exc) # otherwise delete the existing leases file and start dnsmasq lease_file = dhcp_file(network['vlan'], 'leases') @@ -165,35 +169,37 @@ def start_dnsmasq(network): # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, 'DNSMASQ_INTERFACE': network['bridge_name']} - execute(dnsmasq_cmd(network), addl_env=env) + execute(_dnsmasq_cmd(network), addl_env=env) def stop_dnsmasq(network): - """ stops the dnsmasq instance for a given network """ + """Stops the dnsmasq instance for a given network""" pid = dnsmasq_pid_for(network) if pid: try: os.kill(pid, signal.SIGTERM) - except Exception, e: - logging.debug("Killing dnsmasq threw %s", e) + except Exception as exc: # pylint: disable-msg=W0703 + logging.debug("Killing dnsmasq threw %s", exc) def dhcp_file(vlan, kind): - """ return path to a pid, leases or conf file for a vlan """ + """Return path to a pid, leases or conf file for a vlan""" return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind)) def bin_file(script): + """Return the absolute path to scipt in the bin directory""" return os.path.abspath(os.path.join(__file__, "../../../bin", script)) def dnsmasq_pid_for(network): - """ the pid for prior dnsmasq instance for a vlan, - returns None if no pid file exists + """Returns he pid for prior dnsmasq instance for a vlan - if machine has rebooted pid might be incorrect (caller should check) + Returns None if no pid file exists + + If machine has rebooted pid might be incorrect (caller should check) """ pid_file = dhcp_file(network['vlan'], 'pid') From 538fe868a8c89f892bffbfc0001b64e3bf1c9cf5 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 10 Aug 2010 15:28:35 -0400 Subject: [PATCH 045/101] Oops, we need eventlet as well. --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b0..e3591e92de 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -4,6 +4,7 @@ amqplib==0.6.1 anyjson==0.2.4 boto==2.0b1 carrot==0.10.5 +eventlet==0.9.10 lockfile==0.8 python-daemon==1.5.5 python-gflags==1.3 From e0983caad1c3ff7ca451094f8778b1a62bf91531 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 12:46:40 -0700 Subject: [PATCH 046/101] Further pylint cleanup --- nova/endpoint/cloud.py | 10 +++++----- nova/network/linux_net.py | 4 ++-- nova/network/service.py | 24 ++++++++++++++++-------- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index ad9188ff3d..02969c8e9e 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -103,7 +103,7 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], + line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) @@ -423,7 +423,7 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), + instance.get('project_id', None), instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') @@ -560,15 +560,15 @@ class CloudController(object): # TODO: Get the real security group of launch in here security_group = "default" for num in range(int(kwargs['max_count'])): - vpn = False + is_vpn = False if image_id == FLAGS.vpn_image_id: - vpn = True + is_vpn = True allocate_result = yield rpc.call(network_topic, {"method": "allocate_fixed_ip", "args": {"user_id": context.user.id, "project_id": context.project.id, "security_group": security_group, - "vpn": vpn}}) + "is_vpn": is_vpn}}) allocate_data = allocate_result['result'] inst = self.instdir.new() inst['image_id'] = image_id diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 2f6a9638d6..56b4a9dd2e 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -158,7 +158,7 @@ def start_dnsmasq(network): # correct dnsmasq process try: os.kill(pid, signal.SIGHUP) - except Exception as exc: # pylint: disable-msg=W0703 + except Exception as exc: # pylint: disable=W0703 logging.debug("Hupping dnsmasq threw %s", exc) # otherwise delete the existing leases file and start dnsmasq @@ -179,7 +179,7 @@ def stop_dnsmasq(network): if pid: try: os.kill(pid, signal.SIGTERM) - except Exception as exc: # pylint: disable-msg=W0703 + except Exception as exc: # pylint: disable=W0703 logging.debug("Killing dnsmasq threw %s", exc) diff --git a/nova/network/service.py b/nova/network/service.py index f133241039..fd45496c9f 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -17,7 +17,7 @@ # under the License. """ -Network Nodes are responsible for allocating ips and setting up network +Network Hosts are responsible for allocating ips and setting up network """ from nova import datastore @@ -53,6 +53,7 @@ flags.DEFINE_string('flat_network_dns', '8.8.4.4', def type_to_class(network_type): + """Convert a network_type string into an actual Python class""" if network_type == 'flat': return FlatNetworkService elif network_type == 'vlan': @@ -61,6 +62,7 @@ def type_to_class(network_type): def setup_compute_network(network_type, user_id, project_id, security_group): + """Sets up the network on a compute host""" srv = type_to_class(network_type) srv.setup_compute_network(network_type, user_id, @@ -69,12 +71,14 @@ def setup_compute_network(network_type, user_id, project_id, security_group): def get_host_for_project(project_id): + """Get host allocated to project from datastore""" redis = datastore.Redis.instance() return redis.get(_host_key(project_id)) def _host_key(project_id): - return "network_host:%s" % project_id + """Returns redis host key for network""" + return "networkhost:%s" % project_id class BaseNetworkService(service.Service): @@ -84,6 +88,7 @@ class BaseNetworkService(service.Service): """ def __init__(self, *args, **kwargs): self.network = model.PublicNetworkController() + super(BaseNetworkService, self).__init__(*args, **kwargs) def set_network_host(self, user_id, project_id, *args, **kwargs): """Safely sets the host of the projects network""" @@ -113,7 +118,7 @@ class BaseNetworkService(service.Service): pass @classmethod - def setup_compute_network(self, user_id, project_id, security_group, + def setup_compute_network(cls, user_id, project_id, security_group, *args, **kwargs): """Sets up matching network for compute hosts""" raise NotImplementedError() @@ -142,7 +147,7 @@ class FlatNetworkService(BaseNetworkService): """Basic network where no vlans are used""" @classmethod - def setup_compute_network(self, user_id, project_id, security_group, + def setup_compute_network(cls, user_id, project_id, security_group, *args, **kwargs): """Network is created manually""" pass @@ -186,13 +191,14 @@ class VlanNetworkService(BaseNetworkService): # simplified and improved. Also there it may be useful # to support vlans separately from dhcp, instead of having # both of them together in this class. + # pylint: disable=W0221 def allocate_fixed_ip(self, user_id, project_id, security_group='default', - vpn=False, *args, **kwargs): - """Gets a fixed ip from the pool """ + is_vpn=False, *args, **kwargs): + """Gets a fixed ip from the pool""" mac = utils.generate_mac() net = model.get_project_network(project_id) - if vpn: + if is_vpn: fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac) else: fixed_ip = net.allocate_ip(user_id, project_id, mac) @@ -207,9 +213,11 @@ class VlanNetworkService(BaseNetworkService): return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip) def lease_ip(self, address): + """Called by bridge when ip is leased""" return model.get_network_by_address(address).lease_ip(address) def release_ip(self, address): + """Called by bridge when ip is released""" return model.get_network_by_address(address).release_ip(address) def restart_nets(self): @@ -223,7 +231,7 @@ class VlanNetworkService(BaseNetworkService): vpn.NetworkData.create(project_id) @classmethod - def setup_compute_network(self, user_id, project_id, security_group, + def setup_compute_network(cls, user_id, project_id, security_group, *args, **kwargs): """Sets up matching network for compute hosts""" # NOTE(vish): Use BridgedNetwork instead of DHCPNetwork because From 712b6e41d40303a7a3e9d0ce21dde628361417ae Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 12:51:42 -0700 Subject: [PATCH 047/101] Pylint clean of vpn.py --- nova/network/vpn.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/nova/network/vpn.py b/nova/network/vpn.py index 74eebf9a8b..a0e2a7fa18 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -35,6 +35,7 @@ flags.DEFINE_integer('vpn_end_port', 2000, class NoMorePorts(exception.Error): + """No ports available to allocate for the given ip""" pass @@ -68,42 +69,44 @@ class NetworkData(datastore.BasicModel): return network_data @classmethod - def find_free_port_for_ip(cls, ip): + def find_free_port_for_ip(cls, vpn_ip): """Finds a free port for a given ip from the redis set""" # TODO(vish): these redis commands should be generalized and # placed into a base class. Conceptually, it is # similar to an association, but we are just # storing a set of values instead of keys that # should be turned into objects. - cls._ensure_set_exists(ip) + cls._ensure_set_exists(vpn_ip) - port = datastore.Redis.instance().spop(cls._redis_ports_key(ip)) + port = datastore.Redis.instance().spop(cls._redis_ports_key(vpn_ip)) if not port: raise NoMorePorts() return port @classmethod - def _redis_ports_key(cls, ip): - return 'ip:%s:ports' % ip + def _redis_ports_key(cls, vpn_ip): + """Key that ports are stored under in redis""" + return 'ip:%s:ports' % vpn_ip @classmethod - def _ensure_set_exists(cls, ip): + def _ensure_set_exists(cls, vpn_ip): + """Creates the set of ports for the ip if it doesn't already exist""" # TODO(vish): these ports should be allocated through an admin # command instead of a flag redis = datastore.Redis.instance() - if (not redis.exists(cls._redis_ports_key(ip)) and - not redis.exists(cls._redis_association_name('ip', ip))): + if (not redis.exists(cls._redis_ports_key(vpn_ip)) and + not redis.exists(cls._redis_association_name('ip', vpn_ip))): for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1): - redis.sadd(cls._redis_ports_key(ip), i) + redis.sadd(cls._redis_ports_key(vpn_ip), i) @classmethod - def num_ports_for_ip(cls, ip): + def num_ports_for_ip(cls, vpn_ip): """Calculates the number of free ports for a given ip""" - cls._ensure_set_exists(ip) - return datastore.Redis.instance().scard('ip:%s:ports' % ip) + cls._ensure_set_exists(vpn_ip) + return datastore.Redis.instance().scard('ip:%s:ports' % vpn_ip) @property - def ip(self): + def ip(self): # pylint: disable=C0103 """The ip assigned to the project""" return self['ip'] From 049b89babe10068d3976f3f3a99b7dce120e2962 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 10 Aug 2010 18:17:44 -0400 Subject: [PATCH 048/101] work on a router that works with wsgi and non-wsgi routing --- nova/endpoint/rackspace.py | 27 ++++++++------ nova/wsgi.py | 76 ++++++++++++++++++++++++++++++++++++++ tools/pip-requires | 3 ++ 3 files changed, 94 insertions(+), 12 deletions(-) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index 75b828e91d..b4e6cd823e 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -45,18 +45,20 @@ class API(wsgi.Middleware): def __init__(self): super(API, self).__init__(Router(webob.exc.HTTPNotFound())) - def __call__(self, environ, start_response): + @webob.dec.wsgify + def __call__(self, req): + return self.application context = {} - if "HTTP_X_AUTH_TOKEN" in environ: + if "HTTP_X_AUTH_TOKEN" in req.environ: context['user'] = manager.AuthManager().get_user_from_access_key( - environ['HTTP_X_AUTH_TOKEN']) + req.environ['HTTP_X_AUTH_TOKEN']) if context['user']: context['project'] = manager.AuthManager().get_project( context['user'].name) if "user" not in context: - return webob.exc.HTTPForbidden()(environ, start_response) + return webob.exc.HTTPForbidden() environ['nova.context'] = context - return self.application(environ, start_response) + return self.application class Router(wsgi.Router): @@ -64,13 +66,14 @@ class Router(wsgi.Router): def _build_map(self): """Build routing map for authentication and cloud.""" - self._connect("/v1.0", controller=AuthenticationAPI()) - cloud = CloudServerAPI() - self._connect("/servers", controller=cloud.launch_server, - conditions={"method": ["POST"]}) - self._connect("/servers/{server_id}", controller=cloud.delete_server, - conditions={'method': ["DELETE"]}) - self._connect("/servers", controller=cloud) + self.map.resource("server", "servers", controller=CloudServerAPI()) + #self._connect("/v1.0", controller=AuthenticationAPI()) + #cloud = CloudServerAPI() + #self._connect("/servers", controller=cloud.launch_server, + # conditions={"method": ["POST"]}) + #self._connect("/servers/{server_id}", controller=cloud.delete_server, + # conditions={'method': ["DELETE"]}) + #self._connect("/servers", controller=cloud) class AuthenticationAPI(wsgi.Application): diff --git a/nova/wsgi.py b/nova/wsgi.py index 4fd6e59e36..2716481058 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -40,6 +40,7 @@ def run_server(application, port): eventlet.wsgi.server(sock, application) +# TODO(gundlach): I think we should toss this class, now that it has no purpose. class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @@ -140,6 +141,81 @@ class ParsedRoutes(Middleware): app = environ['wsgiorg.routing_args'][1]['controller'] return app(environ, start_response) +class MichaelRouter(object): + """ + My attempt at a routing class. Just override __init__ to call + super, then set up routes in self.map. + """ + + def __init__(self): + self.map = routes.Mapper() + self._router = routes.middleware.RoutesMiddleware(self._proceed, self.map) + + @webob.dec.wsgify + def __call__(self, req): + """ + Route the incoming request to a controller based on self.map. + If no match, return a 404. + """ + return self._router + + @webob.dec.wsgify + def _proceed(self, req): + """ + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. + """ + if req.environ['routes.route'] is None: + return webob.exc.HTTPNotFound() + match = environ['wsgiorg.routing_args'][1] + if match.get('_is_wsgi', False): + wsgiapp = match['controller'] + return req.get_response(wsgiapp) + else: + # TODO(gundlach): doubt this is the right way -- and it really + # feels like this code should exist somewhere already on the + # internet + controller, action = match['controller'], match['action'] + delete match['controller'] + delete match['action'] + return _as_response(getattr(controller, action)(**match)) + + controller = environ['wsgiorg.routing_args'][1]['controller'] + self._dispatch(controller) + + def _as_response(self, result): + """ + When routing to a non-wsgi controller+action, its result will + be passed here before returning up the WSGI chain to be converted + into a webob.Response + + + + + +class ApiVersionRouter(MichaelRouter): + + def __init__(self): + super(ApiVersionRouter, self).__init__(self) + + self.map.connect(None, "/v1.0/{path_info:.*}", controller=RsApiRouter()) + self.map.connect(None, "/ec2/{path_info:.*}", controller=Ec2ApiRouter()) + +class RsApiRouter(MichaelRouter): + def __init__(self): + super(RsApiRouter, self).__init__(self) + + self.map.resource("server", "servers", controller=CloudServersServerApi()) + self.map.resource("image", "images", controller=CloudServersImageApi()) + self.map.resource("flavor", "flavors", controller=CloudServersFlavorApi()) + self.map.resource("sharedipgroup", "sharedipgroups", + controller=CloudServersSharedIpGroupApi()) + +class Ec2ApiRouter(object): + def __getattr__(self, key): + return lambda *x: {'dummy response': 'i am a dummy response'} +CloudServersServerApi = CloudServersImageApi = CloudServersFlavorApi = \ + CloudServersSharedIpGroupApi = Ec2ApiRouter class Router(Middleware): # pylint: disable-msg=R0921 """Wrapper to help setup routes.middleware.RoutesMiddleware.""" diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b0..2317907d17 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -4,11 +4,14 @@ amqplib==0.6.1 anyjson==0.2.4 boto==2.0b1 carrot==0.10.5 +eventlet==0.9.10 lockfile==0.8 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 +routes==1.12.3 tornado==1.0 +webob==0.9.8 wsgiref==0.1.2 zope.interface==3.6.1 mox==0.5.0 From 47bf3ed11f2f372a07ea3b1b8deb9f7684cc2e5d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 15:45:24 -0700 Subject: [PATCH 049/101] lots more pylint fixes --- nova/network/linux_net.py | 2 +- nova/network/model.py | 131 +++++++++++++++++++---------- nova/tests/network_unittest.py | 145 ++++++++++++++++----------------- 3 files changed, 159 insertions(+), 119 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 56b4a9dd2e..0e8ddcc6a2 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -118,7 +118,7 @@ def _dnsmasq_cmd(net): ' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'), ' --listen-address=%s' % net.dhcp_listen_address, ' --except-interface=lo', - ' --dhcp-range=%s,static,600s' % (net.dhcp_range_start), + ' --dhcp-range=%s,static,600s' % net.dhcp_range_start, ' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'), ' --dhcp-script=%s' % bin_file('nova-dhcpbridge'), ' --leasefile-ro'] diff --git a/nova/network/model.py b/nova/network/model.py index 734a3f7a92..7b1e16f261 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -57,7 +57,8 @@ logging.getLogger().setLevel(logging.DEBUG) class Vlan(datastore.BasicModel): - def __init__(self, project, vlan): + """Tracks vlans assigned to project it the datastore""" + def __init__(self, project, vlan): # pylint: disable=W0231 """ Since we don't want to try and find a vlan by its identifier, but by a project id, we don't call super-init. @@ -67,10 +68,12 @@ class Vlan(datastore.BasicModel): @property def identifier(self): + """Datastore identifier""" return "%s:%s" % (self.project_id, self.vlan_id) @classmethod def create(cls, project, vlan): + """Create a Vlan object""" instance = cls(project, vlan) instance.save() return instance @@ -78,6 +81,7 @@ class Vlan(datastore.BasicModel): @classmethod @datastore.absorb_connection_error def lookup(cls, project): + """Returns object by project if it exists in datastore or None""" set_name = cls._redis_set_name(cls.__name__) vlan = datastore.Redis.instance().hget(set_name, project) if vlan: @@ -88,19 +92,19 @@ class Vlan(datastore.BasicModel): @classmethod @datastore.absorb_connection_error def dict_by_project(cls): - """a hash of project:vlan""" + """A hash of project:vlan""" set_name = cls._redis_set_name(cls.__name__) - return datastore.Redis.instance().hgetall(set_name) + return datastore.Redis.instance().hgetall(set_name) or {} @classmethod @datastore.absorb_connection_error def dict_by_vlan(cls): - """a hash of vlan:project""" + """A hash of vlan:project""" set_name = cls._redis_set_name(cls.__name__) retvals = {} - hashset = datastore.Redis.instance().hgetall(set_name) - for val in hashset.keys(): - retvals[hashset[val]] = val + hashset = datastore.Redis.instance().hgetall(set_name) or {} + for (key, val) in hashset.iteritems(): + retvals[val] = key return retvals @classmethod @@ -125,10 +129,12 @@ class Vlan(datastore.BasicModel): @datastore.absorb_connection_error def destroy(self): + """Removes the object from the datastore""" set_name = self._redis_set_name(self.__class__.__name__) datastore.Redis.instance().hdel(set_name, self.project_id) def subnet(self): + """Returns a string containing the subnet""" vlan = int(self.vlan_id) network = IPy.IP(FLAGS.private_range) start = (vlan - FLAGS.vlan_start) * FLAGS.network_size @@ -142,17 +148,22 @@ class Vlan(datastore.BasicModel): # TODO(ja): does vlanpool "keeper" need to know the min/max - # shouldn't FLAGS always win? class BaseNetwork(datastore.BasicModel): + """Implements basic logic for allocating ips in a network""" override_type = 'network' @property def identifier(self): + """Datastore identifier""" return self.network_id def default_state(self): + """Default values for new objects""" return {'network_id': self.network_id, 'network_str': self.network_str} @classmethod + # pylint: disable=R0913 def create(cls, user_id, project_id, security_group, vlan, network_str): + """Create a BaseNetwork object""" network_id = "%s:%s" % (project_id, security_group) net = cls(network_id, network_str) net['user_id'] = user_id @@ -170,52 +181,65 @@ class BaseNetwork(datastore.BasicModel): @property def network(self): + """Returns a string representing the network""" return IPy.IP(self['network_str']) @property def netmask(self): + """Returns the netmask of this network""" return self.network.netmask() @property def gateway(self): + """Returns the network gateway address""" return self.network[1] @property def broadcast(self): + """Returns the network broadcast address""" return self.network.broadcast() @property def bridge_name(self): + """Returns the bridge associated with this network""" return "br%s" % (self["vlan"]) @property def user(self): + """Returns the user associated with this network""" return manager.AuthManager().get_user(self['user_id']) @property def project(self): + """Returns the project associated with this network""" return manager.AuthManager().get_project(self['project_id']) @property def _hosts_key(self): + """Datastore key where hosts are stored""" return "network:%s:hosts" % (self['network_str']) @property def hosts(self): + """Returns a hash of all hosts allocated in this network""" return datastore.Redis.instance().hgetall(self._hosts_key) or {} def _add_host(self, _user_id, _project_id, host, target): + """Add a host to the datastore""" datastore.Redis.instance().hset(self._hosts_key, host, target) def _rem_host(self, host): + """Remove a host from the datastore""" datastore.Redis.instance().hdel(self._hosts_key, host) @property def assigned(self): + """Returns a list of all assigned keys""" return datastore.Redis.instance().hkeys(self._hosts_key) @property def available(self): + """Returns a list of all available addresses in the network""" for idx in range(self.num_bottom_reserved_ips, len(self.network) - self.num_top_reserved_ips): address = str(self.network[idx]) @@ -224,15 +248,18 @@ class BaseNetwork(datastore.BasicModel): @property def num_bottom_reserved_ips(self): + """Returns number of ips reserved at the bottom of the range""" return 2 # Network, Gateway @property def num_top_reserved_ips(self): + """Returns number of ips reserved at the top of the range""" return 1 # Broadcast def allocate_ip(self, user_id, project_id, mac): + """Allocates an ip to a mac address""" for address in self.available: - logging.debug("Allocating IP %s to %s" % (address, project_id)) + logging.debug("Allocating IP %s to %s", address, project_id) self._add_host(user_id, project_id, address, mac) self.express(address=address) return address @@ -240,28 +267,37 @@ class BaseNetwork(datastore.BasicModel): (project_id, str(self.network))) def lease_ip(self, ip_str): - logging.debug("Leasing allocated IP %s" % (ip_str)) + """Called when DHCP lease is activated""" + logging.debug("Leasing allocated IP %s", ip_str) def release_ip(self, ip_str): + """Called when DHCP lease expires + + Removes the ip from the assigned list""" if not ip_str in self.assigned: raise exception.AddressNotAllocated() self._rem_host(ip_str) self.deexpress(address=ip_str) + logging.debug("Releasing IP %s", ip_str) def deallocate_ip(self, ip_str): + """Deallocates an allocated ip""" # NOTE(vish): Perhaps we should put the ip into an intermediate # state, so we know that we are pending waiting for # dnsmasq to confirm that it has been released. - pass + logging.debug("Deallocating allocated IP %s", ip_str) def list_addresses(self): + """List all allocated addresses""" for address in self.hosts: yield address def express(self, address=None): + """Set up network. Implemented in subclasses""" pass def deexpress(self, address=None): + """Tear down network. Implemented in subclasses""" pass @@ -286,7 +322,11 @@ class BridgedNetwork(BaseNetwork): override_type = 'network' @classmethod - def get_network_for_project(cls, user_id, project_id, security_group): + def get_network_for_project(cls, + user_id, + project_id, + security_group='default'): + """Returns network for a given project""" vlan = get_vlan_for_project(project_id) network_str = vlan.subnet() return cls.create(user_id, project_id, security_group, vlan.vlan_id, @@ -304,29 +344,14 @@ class BridgedNetwork(BaseNetwork): class DHCPNetwork(BridgedNetwork): - """ - properties: - dhcp_listen_address: the ip of the gateway / dhcp host - dhcp_range_start: the first ip to give out - dhcp_range_end: the last ip to give out - """ + """Network supporting DHCP""" bridge_gets_ip = True override_type = 'network' def __init__(self, *args, **kwargs): super(DHCPNetwork, self).__init__(*args, **kwargs) - # logging.debug("Initing DHCPNetwork object...") - self.dhcp_listen_address = self.gateway - self.dhcp_range_start = self.network[self.num_bottom_reserved_ips] - self.dhcp_range_end = self.network[-self.num_top_reserved_ips] - try: + if not(os.path.exists(FLAGS.networks_path)): os.makedirs(FLAGS.networks_path) - # NOTE(todd): I guess this is a lazy way to not have to check if the - # directory exists, but shouldn't we be smarter about - # telling the difference between existing directory and - # permission denied? (Errno 17 vs 13, OSError) - except Exception, err: - pass @property def num_bottom_reserved_ips(self): @@ -338,6 +363,16 @@ class DHCPNetwork(BridgedNetwork): return super(DHCPNetwork, self).num_top_reserved_ips + \ FLAGS.cnt_vpn_clients + @property + def dhcp_listen_address(self): + """Address where dhcp server should listen""" + return self.gateway + + @property + def dhcp_range_start(self): + """Starting address dhcp server should use""" + return self.network[self.num_bottom_reserved_ips] + def express(self, address=None): super(DHCPNetwork, self).express(address=address) if len(self.assigned) > 0: @@ -346,15 +381,17 @@ class DHCPNetwork(BridgedNetwork): linux_net.start_dnsmasq(self) else: logging.debug("Not launching dnsmasq: no hosts.") - self.express_cloudpipe() + self.express_vpn() def allocate_vpn_ip(self, user_id, project_id, mac): + """Allocates the reserved ip to a vpn instance""" address = str(self.network[2]) self._add_host(user_id, project_id, address, mac) self.express(address=address) return address - def express_cloudpipe(self): + def express_vpn(self): + """Sets up routing rules for vpn""" private_ip = str(self.network[2]) linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT" % (private_ip, )) @@ -372,6 +409,7 @@ class DHCPNetwork(BridgedNetwork): class PublicAddress(datastore.BasicModel): + """Represents an elastic ip in the datastore""" override_type = "address" def __init__(self, address): @@ -387,6 +425,7 @@ class PublicAddress(datastore.BasicModel): @classmethod def create(cls, user_id, project_id, address): + """Creates a PublicAddress object""" addr = cls(address) addr['user_id'] = user_id addr['project_id'] = project_id @@ -400,12 +439,13 @@ DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): + """Handles elastic ips""" override_type = 'network' def __init__(self, *args, **kwargs): network_id = "public:default" super(PublicNetworkController, self).__init__(network_id, - FLAGS.public_range) + FLAGS.public_range, *args, **kwargs) self['user_id'] = "public" self['project_id'] = "public" self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', @@ -416,12 +456,14 @@ class PublicNetworkController(BaseNetwork): @property def host_objs(self): + """Returns assigned addresses as PublicAddress objects""" for address in self.assigned: yield PublicAddress(address) - def get_host(self, host): - if host in self.assigned: - return PublicAddress(host) + def get_host(self, public_ip): + """Returns a specific public ip as PublicAddress object""" + if public_ip in self.assigned: + return PublicAddress(public_ip) return None def _add_host(self, user_id, project_id, host, _target): @@ -437,9 +479,10 @@ class PublicNetworkController(BaseNetwork): self.release_ip(ip_str) def associate_address(self, public_ip, private_ip, instance_id): + """Associates a public ip to a private ip and instance id""" if not public_ip in self.assigned: raise exception.AddressNotAllocated() - # TODO(joshua): Keep an index going both ways + # TODO(josh): Keep an index going both ways for addr in self.host_objs: if addr.get('private_ip', None) == private_ip: raise exception.AddressAlreadyAssociated() @@ -452,6 +495,7 @@ class PublicNetworkController(BaseNetwork): self.express(address=public_ip) def disassociate_address(self, public_ip): + """Disassociates a public ip with its private ip""" if not public_ip in self.assigned: raise exception.AddressNotAllocated() addr = self.get_host(public_ip) @@ -476,7 +520,7 @@ class PublicNetworkController(BaseNetwork): % (public_ip, private_ip)) linux_net.confirm_rule("POSTROUTING -t nat -s %s -j SNAT --to %s" % (private_ip, public_ip)) - # TODO: Get these from the secgroup datastore entries + # TODO(joshua): Get these from the secgroup datastore entries linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT" % (private_ip)) for (protocol, port) in DEFAULT_PORTS: @@ -503,9 +547,7 @@ class PublicNetworkController(BaseNetwork): # piece of architecture that mitigates it (only one queue # listener per net)? def get_vlan_for_project(project_id): - """ - Allocate vlan IDs to individual users. - """ + """Allocate vlan IDs to individual users""" vlan = Vlan.lookup(project_id) if vlan: return vlan @@ -538,7 +580,7 @@ def get_vlan_for_project(project_id): def get_project_network(project_id, security_group='default'): - """ get a project's private network, allocating one if needed """ + """Gets a project's private network, allocating one if needed""" project = manager.AuthManager().get_project(project_id) if not project: raise nova_exception.NotFound("Project %s doesn't exist." % project_id) @@ -549,26 +591,29 @@ def get_project_network(project_id, security_group='default'): def get_network_by_address(address): + """Gets the network for a given private ip""" # TODO(vish): This is completely the wrong way to do this, but # I'm getting the network binary working before I # tackle doing this the right way. - logging.debug("Get Network By Address: %s" % address) + logging.debug("Get Network By Address: %s", address) for project in manager.AuthManager().get_projects(): net = get_project_network(project.id) if address in net.assigned: - logging.debug("Found %s in %s" % (address, project.id)) + logging.debug("Found %s in %s", address, project.id) return net raise exception.AddressNotAllocated() def get_network_by_interface(iface, security_group='default'): + """Gets the network for a given interface""" vlan = iface.rpartition("br")[2] project_id = Vlan.dict_by_vlan().get(vlan) return get_project_network(project_id, security_group) def get_public_ip_for_instance(instance_id): - # FIXME: this should be a lookup - iteration won't scale + """Gets the public ip for a given instance""" + # FIXME(josh): this should be a lookup - iteration won't scale for address_record in PublicAddress.all(): if address_record.get('instance_id', 'available') == instance_id: return address_record['address'] diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 9aa39e516c..5671a88868 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -15,7 +15,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - +""" +Unit Tests for network code +""" import IPy import os import logging @@ -33,7 +35,8 @@ FLAGS = flags.FLAGS class NetworkTestCase(test.TrialTestCase): - def setUp(self): + """Test cases for network code""" + def setUp(self): # pylint: disable=C0103 super(NetworkTestCase, self).setUp() # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge @@ -44,7 +47,6 @@ class NetworkTestCase(test.TrialTestCase): network_size=32) logging.getLogger().setLevel(logging.DEBUG) self.manager = manager.AuthManager() - self.dnsmasq = FakeDNSMasq() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] self.projects.append(self.manager.create_project('netuser', @@ -56,49 +58,48 @@ class NetworkTestCase(test.TrialTestCase): 'netuser', name)) vpn.NetworkData.create(self.projects[i].id) - self.network = model.PublicNetworkController() self.service = service.VlanNetworkService() - def tearDown(self): + def tearDown(self): # pylint: disable=C0103 super(NetworkTestCase, self).tearDown() for project in self.projects: self.manager.delete_project(project) self.manager.delete_user(self.user) def test_public_network_allocation(self): + """Makes sure that we can allocaate a public ip""" pubnet = IPy.IP(flags.FLAGS.public_range) - address = self.network.allocate_ip(self.user.id, - self.projects[0].id, - "public") + address = self.service.allocate_elastic_ip(self.user.id, + self.projects[0].id) self.assertTrue(IPy.IP(address) in pubnet) - self.assertTrue(IPy.IP(address) in self.network.network) def test_allocate_deallocate_fixed_ip(self): + """Makes sure that we can allocate and deallocate a fixed ip""" result = self.service.allocate_fixed_ip( self.user.id, self.projects[0].id) address = result['private_dns_name'] mac = result['mac_address'] - logging.debug("Was allocated %s" % (address)) net = model.get_project_network(self.projects[0].id, "default") self.assertEqual(True, is_in_project(address, self.projects[0].id)) hostname = "test-host" - self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) - rv = self.service.deallocate_fixed_ip(address) + issue_ip(mac, address, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(address) # Doesn't go away until it's dhcp released self.assertEqual(True, is_in_project(address, self.projects[0].id)) - self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) + release_ip(mac, address, hostname, net.bridge_name) self.assertEqual(False, is_in_project(address, self.projects[0].id)) - def test_range_allocation(self): - hostname = "test-host" - result = self.service.allocate_fixed_ip( - self.user.id, self.projects[0].id) + def test_side_effects(self): + """Ensures allocating and releasing has no side effects""" + hostname = "side-effect-host" + result = self.service.allocate_fixed_ip(self.user.id, + self.projects[0].id) mac = result['mac_address'] address = result['private_dns_name'] - result = self.service.allocate_fixed_ip( - self.user, self.projects[1].id) + result = self.service.allocate_fixed_ip(self.user, + self.projects[1].id) secondmac = result['mac_address'] secondaddress = result['private_dns_name'] @@ -111,25 +112,24 @@ class NetworkTestCase(test.TrialTestCase): self.assertEqual(False, is_in_project(address, self.projects[1].id)) # Addresses are allocated before they're issued - self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) - self.dnsmasq.issue_ip(secondmac, secondaddress, - hostname, secondnet.bridge_name) + issue_ip(mac, address, hostname, net.bridge_name) + issue_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) - rv = self.service.deallocate_fixed_ip(address) - self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(address) + release_ip(mac, address, hostname, net.bridge_name) self.assertEqual(False, is_in_project(address, self.projects[0].id)) # First address release shouldn't affect the second self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id)) - rv = self.service.deallocate_fixed_ip(secondaddress) - self.dnsmasq.release_ip(secondmac, secondaddress, - hostname, secondnet.bridge_name) + self.service.deallocate_fixed_ip(secondaddress) + release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name) self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id)) def test_subnet_edge(self): + """Makes sure that private ips don't overlap""" result = self.service.allocate_fixed_ip(self.user.id, self.projects[0].id) firstaddress = result['private_dns_name'] @@ -148,29 +148,34 @@ class NetworkTestCase(test.TrialTestCase): self.user, project_id) mac3 = result['mac_address'] address3 = result['private_dns_name'] + net = model.get_project_network(project_id, "default") + issue_ip(mac, address, hostname, net.bridge_name) + issue_ip(mac2, address2, hostname, net.bridge_name) + issue_ip(mac3, address3, hostname, net.bridge_name) self.assertEqual(False, is_in_project(address, self.projects[0].id)) self.assertEqual(False, is_in_project(address2, self.projects[0].id)) self.assertEqual(False, is_in_project(address3, self.projects[0].id)) - rv = self.service.deallocate_fixed_ip(address) - rv = self.service.deallocate_fixed_ip(address2) - rv = self.service.deallocate_fixed_ip(address3) - net = model.get_project_network(project_id, "default") - self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) - self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name) - self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(address) + self.service.deallocate_fixed_ip(address2) + self.service.deallocate_fixed_ip(address3) + release_ip(mac, address, hostname, net.bridge_name) + release_ip(mac2, address2, hostname, net.bridge_name) + release_ip(mac3, address3, hostname, net.bridge_name) net = model.get_project_network(self.projects[0].id, "default") - rv = self.service.deallocate_fixed_ip(firstaddress) - self.dnsmasq.release_ip(mac, firstaddress, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(firstaddress) + release_ip(mac, firstaddress, hostname, net.bridge_name) def test_vpn_ip_and_port_looks_valid(self): + """Ensure the vpn ip and port are reasonable""" self.assert_(self.projects[0].vpn_ip) self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port) self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port) def test_too_many_vpns(self): + """Ensure error is raised if we run out of vpn ports""" vpns = [] for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)): vpns.append(vpn.NetworkData.create("vpnuser%s" % i)) @@ -180,7 +185,6 @@ class NetworkTestCase(test.TrialTestCase): def test_ips_are_reused(self): """Makes sure that ip addresses that are deallocated get reused""" - result = self.service.allocate_fixed_ip( self.user.id, self.projects[0].id) mac = result['mac_address'] @@ -189,24 +193,18 @@ class NetworkTestCase(test.TrialTestCase): hostname = "reuse-host" net = model.get_project_network(self.projects[0].id, "default") - self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name) - rv = self.service.deallocate_fixed_ip(address) - self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name) + issue_ip(mac, address, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(address) + release_ip(mac, address, hostname, net.bridge_name) result = self.service.allocate_fixed_ip( self.user, self.projects[0].id) secondmac = result['mac_address'] secondaddress = result['private_dns_name'] self.assertEqual(address, secondaddress) - rv = self.service.deallocate_fixed_ip(secondaddress) - self.dnsmasq.issue_ip(secondmac, - secondaddress, - hostname, - net.bridge_name) - self.dnsmasq.release_ip(secondmac, - secondaddress, - hostname, - net.bridge_name) + self.service.deallocate_fixed_ip(secondaddress) + issue_ip(secondmac, secondaddress, hostname, net.bridge_name) + release_ip(secondmac, secondaddress, hostname, net.bridge_name) def test_available_ips(self): """Make sure the number of available ips for the network is correct @@ -242,47 +240,44 @@ class NetworkTestCase(test.TrialTestCase): self.projects[0].id) macs[i] = result['mac_address'] addresses[i] = result['private_dns_name'] - self.dnsmasq.issue_ip(macs[i], - addresses[i], - hostname, - net.bridge_name) + issue_ip(macs[i], addresses[i], hostname, net.bridge_name) self.assertEqual(len(list(net.available)), 0) self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip, self.user.id, self.projects[0].id) for i in range(len(addresses)): - rv = self.service.deallocate_fixed_ip(addresses[i]) - self.dnsmasq.release_ip(macs[i], - addresses[i], - hostname, - net.bridge_name) + self.service.deallocate_fixed_ip(addresses[i]) + release_ip(macs[i], addresses[i], hostname, net.bridge_name) self.assertEqual(len(list(net.available)), num_available_ips) def is_in_project(address, project_id): + """Returns true if address is in specified project""" return address in model.get_project_network(project_id).list_addresses() def binpath(script): + """Returns the absolute path to a script in bin""" return os.path.abspath(os.path.join(__file__, "../../../bin", script)) -class FakeDNSMasq(object): - def issue_ip(self, mac, ip, hostname, interface): - cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'), - mac, ip, hostname) - env = {'DNSMASQ_INTERFACE': interface, - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("ISSUE_IP: %s, %s " % (out, err)) +def issue_ip(mac, private_ip, hostname, interface): + """Run add command on dhcpbridge""" + cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'), + mac, private_ip, hostname) + env = {'DNSMASQ_INTERFACE': interface, + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} + (out, err) = utils.execute(cmd, addl_env=env) + logging.debug("ISSUE_IP: %s, %s ", out, err) - def release_ip(self, mac, ip, hostname, interface): - cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'), - mac, ip, hostname) - env = {'DNSMASQ_INTERFACE': interface, - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("RELEASE_IP: %s, %s " % (out, err)) +def release_ip(mac, private_ip, hostname, interface): + """Run del command on dhcpbridge""" + cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'), + mac, private_ip, hostname) + env = {'DNSMASQ_INTERFACE': interface, + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} + (out, err) = utils.execute(cmd, addl_env=env) + logging.debug("RELEASE_IP: %s, %s ", out, err) From 14c7bca9cb8451e2ec8224fb5699c6f2ad480dac Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 17:34:20 -0700 Subject: [PATCH 050/101] Adds get_roles commands to manager and driver classes --- nova/auth/ldapdriver.py | 34 +++++++++++++++++++++++++++------- nova/auth/manager.py | 18 ++++++++++++++++++ nova/tests/auth_unittest.py | 18 +++++++++++++++++- 3 files changed, 62 insertions(+), 8 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index ec739e1341..aaaf8553c1 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -181,7 +181,7 @@ class LdapDriver(object): if member_uids != None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Project can't be created " + raise exception.NotFound("Project can't be created " "because user %s doesn't exist" % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required @@ -236,6 +236,26 @@ class LdapDriver(object): role_dn = self.__role_to_dn(role, project_id) return self.__remove_from_group(uid, role_dn) + def get_user_roles(self, uid, project_id=None): + """Retrieve list of roles for user (or user and project)""" + if project_id is None: + # NOTE(vish): This is unneccesarily slow, but since we can't + # guarantee that the global roles are located + # together in the ldap tree, we're doing this version. + roles = [] + for role in FLAGS.allowed_roles: + role_dn = self.__role_to_dn(role) + if self.__is_in_group(uid, role_dn): + roles.append(role) + return roles + else: + project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + roles = self.__find_objects(project_dn, + '(&(&(objectclass=groupOfNames)' + '(!(objectclass=novaProject)))' + '(member=%s))' % self.__uid_to_dn(uid)) + return [role['cn'][0] for role in roles] + def delete_user(self, uid): """Delete a user""" if not self.__user_exists(uid): @@ -253,24 +273,24 @@ class LdapDriver(object): self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid, FLAGS.ldap_user_subtree)) - def delete_project(self, name): + def delete_project(self, project_id): """Delete a project""" - project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree) + project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) self.__delete_roles(project_dn) self.__delete_group(project_dn) - def __user_exists(self, name): + def __user_exists(self, uid): """Check if user exists""" - return self.get_user(name) != None + return self.get_user(uid) != None def __key_pair_exists(self, uid, key_name): """Check if key pair exists""" return self.get_user(uid) != None return self.get_key_pair(uid, key_name) != None - def __project_exists(self, name): + def __project_exists(self, project_id): """Check if project exists""" - return self.get_project(name) != None + return self.get_project(project_id) != None def __find_object(self, dn, query=None, scope=None): """Find an object by dn and query""" diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 6d71a7ad69..8195182fca 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -38,6 +38,10 @@ from nova.network import vpn FLAGS = flags.FLAGS +flags.DEFINE_list('allowed_roles', + ['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'], + 'Allowed roles for project') + # NOTE(vish): a user with one of these roles will be a superuser and # have access to all api commands flags.DEFINE_list('superuser_roles', ['cloudadmin'], @@ -455,6 +459,20 @@ class AuthManager(object): with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) + def get_roles(self): + """Get list of allowed roles""" + return FLAGS.allowed_roles + + def get_user_roles(self, user, project=None): + """Get user global or per-project roles""" + roles = [] + with self.driver() as drv: + roles = drv.get_user_roles(User.safe_id(user), + Project.safe_id(project)) + if project is not None and self.is_project_manager(user, project): + roles.append('projectmanager') + return roles + def get_project(self, pid): """Get project object by id""" with self.driver() as drv: diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index f7e0625a33..2d99c8e36f 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -179,7 +179,23 @@ class AuthTestCase(test.BaseTestCase): project.add_role('test1', 'sysadmin') self.assertTrue(project.has_role('test1', 'sysadmin')) - def test_211_can_remove_project_role(self): + def test_211_can_list_project_roles(self): + project = self.manager.get_project('testproj') + user = self.manager.get_user('test1') + self.manager.add_role(user, 'netadmin', project) + roles = self.manager.get_user_roles(user) + self.assertTrue('sysadmin' in roles) + self.assertFalse('netadmin' in roles) + self.assertFalse('projectmanager' in roles) + project_roles = self.manager.get_user_roles(user, project) + self.assertTrue('sysadmin' in project_roles) + self.assertTrue('netadmin' in project_roles) + self.assertTrue('projectmanager' in project_roles) + # has role should be false because global role is missing + self.assertFalse(self.manager.has_role(user, 'netadmin', project)) + + + def test_212_can_remove_project_role(self): project = self.manager.get_project('testproj') self.assertTrue(project.has_role('test1', 'sysadmin')) project.remove_role('test1', 'sysadmin') From 19b9164c4eaae0c2c9144f9e839fbafcac7c3ed3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 17:42:58 -0700 Subject: [PATCH 051/101] Throw exceptions for illegal roles on role add --- nova/auth/manager.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 8195182fca..e338dfc839 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -436,6 +436,10 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to add local role. """ + if role not in FLAGS.allowed_roles: + raise exception.NotFound("The %s role can not be found" % role) + if project is not None and role in FLAGS.global_roles: + raise exception.NotFound("The %s role is global only" % role) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) From cff3cccc342c7d09cd2ec6c95431e1b373eba620 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 18:04:23 -0700 Subject: [PATCH 052/101] change get_roles to have a flag for project_roles or not. Don't show 'projectmanager' in list of roles --- nova/auth/manager.py | 15 +++++++-------- nova/tests/auth_unittest.py | 2 -- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index e338dfc839..064fd78bca 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -463,19 +463,18 @@ class AuthManager(object): with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) - def get_roles(self): + def get_roles(self, project_roles=True): """Get list of allowed roles""" - return FLAGS.allowed_roles + if project_roles: + return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles)) + else: + return FLAGS.allowed_roles def get_user_roles(self, user, project=None): """Get user global or per-project roles""" - roles = [] with self.driver() as drv: - roles = drv.get_user_roles(User.safe_id(user), - Project.safe_id(project)) - if project is not None and self.is_project_manager(user, project): - roles.append('projectmanager') - return roles + return drv.get_user_roles(User.safe_id(user), + Project.safe_id(project)) def get_project(self, pid): """Get project object by id""" diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 2d99c8e36f..0b404bfdcf 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -186,11 +186,9 @@ class AuthTestCase(test.BaseTestCase): roles = self.manager.get_user_roles(user) self.assertTrue('sysadmin' in roles) self.assertFalse('netadmin' in roles) - self.assertFalse('projectmanager' in roles) project_roles = self.manager.get_user_roles(user, project) self.assertTrue('sysadmin' in project_roles) self.assertTrue('netadmin' in project_roles) - self.assertTrue('projectmanager' in project_roles) # has role should be false because global role is missing self.assertFalse(self.manager.has_role(user, 'netadmin', project)) From 253cc1f683dfcfe75b1a5c1eb3a93f07e85bb041 Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Tue, 10 Aug 2010 18:46:49 -0700 Subject: [PATCH 053/101] Wired up admin api for user roles --- nova/adminclient.py | 36 +++++++++++++++++++++++++++++++++++- nova/endpoint/admin.py | 13 +++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 25d5e71cbb..5aa8ff9c21 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -57,6 +57,28 @@ class UserInfo(object): elif name == 'secretkey': self.secretkey = str(value) +class UserRole(object): + """ + Information about a Nova user's role, as parsed through SAX. + Fields include: + role + """ + def __init__(self, connection=None): + self.connection = connection + self.role = None + + def __repr__(self): + return 'UserRole:%s' % self.role + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'role': + self.role = value + else: + setattr(self, name, str(value)) + class ProjectInfo(object): """ Information about a Nova project, as parsed through SAX @@ -114,7 +136,6 @@ class ProjectMember(object): else: setattr(self, name, str(value)) - class HostInfo(object): """ Information about a Nova Host, as parsed through SAX: @@ -196,6 +217,19 @@ class NovaAdminClient(object): """ deletes a user """ return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo) + def get_user_roles(self, user, project=None): + """ + Returns a list of roles for the given user. + Omitting project will return any global roles that the user has. + Specifying project will return only project specific roles. + """ + params = {'User':user} + if project: + params['Project'] = project + return self.apiconn.get_list('DescribeUserRoles', + params, + [('item', UserRole)]) + def add_user_role(self, user, role, project=None): """ Add a role to a user either globally or for a specific project. diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index c4b8c05ca4..a3114c0a39 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -102,6 +102,19 @@ class AdminController(object): return True + @admin_only + def describe_roles(self, context, project_roles=True, **kwargs): + """Returns a list of allowed roles.""" + return manager.AuthManager().get_roles(project_roles) + + @admin_only + def describe_user_roles(self, context, user, project=None, **kwargs): + """Returns a list of roles for the given user. + Omitting project will return any global roles that the user has. + Specifying project will return only project specific roles. + """ + return manager.AuthManager().get_user_roles(user, project=project) + @admin_only def modify_user_role(self, context, user, role, project=None, operation='add', **kwargs): From 2955018b58a731f48dcdee64d889b4be104250f1 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 19:00:35 -0700 Subject: [PATCH 054/101] fix spacing issue in ldapdriver --- nova/auth/ldapdriver.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index aaaf8553c1..453fa196ca 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -181,8 +181,9 @@ class LdapDriver(object): if member_uids != None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound("Project can't be created " - "because user %s doesn't exist" % member_uid) + raise exception.NotFound("Project can't be created " + "because user %s doesn't exist" + % member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required if not manager_dn in members: From cf2002486d651576a28a4c53c6b49bb30c047108 Mon Sep 17 00:00:00 2001 From: Devin Carlen Date: Tue, 10 Aug 2010 19:01:40 -0700 Subject: [PATCH 055/101] Fixed admin api for user roles --- nova/adminclient.py | 13 +++++++++---- nova/endpoint/admin.py | 6 ++++-- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 5aa8ff9c21..242298a759 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -217,11 +217,16 @@ class NovaAdminClient(object): """ deletes a user """ return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo) + def get_roles(self, project_roles=True): + """Returns a list of available roles.""" + return self.apiconn.get_list('DescribeRoles', + {'ProjectRoles': project_roles}, + [('item', UserRole)]) + def get_user_roles(self, user, project=None): - """ - Returns a list of roles for the given user. - Omitting project will return any global roles that the user has. - Specifying project will return only project specific roles. + """Returns a list of roles for the given user. + Omitting project will return any global roles that the user has. + Specifying project will return only project specific roles. """ params = {'User':user} if project: diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index a3114c0a39..4f4824fcaf 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -105,7 +105,8 @@ class AdminController(object): @admin_only def describe_roles(self, context, project_roles=True, **kwargs): """Returns a list of allowed roles.""" - return manager.AuthManager().get_roles(project_roles) + roles = manager.AuthManager().get_roles(project_roles) + return { 'roles': [{'role': r} for r in roles]} @admin_only def describe_user_roles(self, context, user, project=None, **kwargs): @@ -113,7 +114,8 @@ class AdminController(object): Omitting project will return any global roles that the user has. Specifying project will return only project specific roles. """ - return manager.AuthManager().get_user_roles(user, project=project) + roles = manager.AuthManager().get_user_roles(user, project=project) + return { 'roles': [{'role': r} for r in roles]} @admin_only def modify_user_role(self, context, user, role, project=None, From 0ccd10283b922cb9822872b89713aad1a5da214e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 10 Aug 2010 21:51:18 -0700 Subject: [PATCH 056/101] support a hostname that can be looked up --- bin/nova-dhcpbridge | 6 ++---- nova/compute/model.py | 10 +++++++++- nova/endpoint/cloud.py | 6 +++--- nova/network/linux_net.py | 21 +++++++++++---------- 4 files changed, 25 insertions(+), 18 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 6a9115fcbd..0dac2672a1 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -69,10 +69,8 @@ def init_leases(interface): """Get the list of hosts for an interface.""" net = model.get_network_by_interface(interface) res = "" - for host_name in net.hosts: - res += "%s\n" % linux_net.host_dhcp(net, - host_name, - net.hosts[host_name]) + for fixed_ip in net.hosts: + res += "%s\n" % linux_net.host_dhcp(fixed_ip, net.hosts[fixed_ip]) return res diff --git a/nova/compute/model.py b/nova/compute/model.py index 266a93b9a3..94fe43c1a1 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -123,7 +123,15 @@ class Instance(datastore.BasicModel): 'node_name': 'unassigned', 'project_id': 'unassigned', 'user_id': 'unassigned', - 'private_dns_name': 'unassigned'} + 'private_dns_name': 'unassigned', + 'hostname': self.instance_id} + + + @property + def hostname(self): + # NOTE(vish): this is to be backward compatible with instances that may + # not have been created with a hostname + return self.get('hostname', self.instance_id) @property def identifier(self): diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 02969c8e9e..26071cfeda 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -137,17 +137,17 @@ class CloudController(object): 'root': '/dev/sda1', 'swap': 'sda3' }, - 'hostname': i['private_dns_name'], # is this public sometimes? + 'hostname': i.hostname, 'instance-action': 'none', 'instance-id': i['instance_id'], 'instance-type': i.get('instance_type', ''), - 'local-hostname': i['private_dns_name'], + 'local-hostname': i.hostname, 'local-ipv4': i['private_dns_name'], # TODO: switch to IP 'kernel-id': i.get('kernel_id', ''), 'placement': { 'availaibility-zone': i.get('availability_zone', 'nova'), }, - 'public-hostname': i.get('dns_name', ''), + 'public-hostname': i.hostname, 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 0e8ddcc6a2..8a8fff2258 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -27,6 +27,7 @@ import os from nova import flags from nova import utils +from nova.compute import model FLAGS = flags.FLAGS @@ -125,12 +126,14 @@ def _dnsmasq_cmd(net): return ''.join(cmd) -def host_dhcp(network, host, mac): - """Return a host string for a network, host, and mac""" - # Logically, the idx of instances they've launched in this net - idx = host.split(".")[-1] - return "%s,%s-%s-%s.novalocal,%s" % \ - (mac, network['user_id'], network['vlan'], idx, host) +def host_dhcp(fixed_ip, mac): + """Return a host string for a fixed_ip and mac""" + instance = model.InstanceDirectory().by_ip(fixed_ip) + if instance is None: + hostname = 'ip-%s' % fixed_ip.replace('.', '-') + else: + hostname = instance.hostname + return "%s,%s.novalocal,%s" % (mac, hostname, fixed_ip) # TODO(ja): if the system has restarted or pid numbers have wrapped @@ -145,10 +148,8 @@ def start_dnsmasq(network): signal causing it to reload, otherwise spawn a new instance """ with open(dhcp_file(network['vlan'], 'conf'), 'w') as f: - for host_name in network.hosts: - f.write("%s\n" % host_dhcp(network, - host_name, - network.hosts[host_name])) + for fixed_ip in network.hosts: + f.write("%s\n" % host_dhcp(fixed_ip, network.hosts[fixed_ip])) pid = dnsmasq_pid_for(network) From 24f8cb89f8b92563d364186b80c7d73d28b26bea Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 11 Aug 2010 01:20:21 -0700 Subject: [PATCH 057/101] Actually pass in hostname and create a proper model for data in network code --- bin/nova-dhcpbridge | 4 +- nova/compute/model.py | 10 +- nova/datastore.py | 12 ++- nova/endpoint/cloud.py | 18 +++- nova/network/linux_net.py | 20 ++-- nova/network/model.py | 181 ++++++++++++++++----------------- nova/network/service.py | 26 +++-- nova/tests/network_unittest.py | 7 +- 8 files changed, 145 insertions(+), 133 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 0dac2672a1..b1ad1c8fef 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -69,8 +69,8 @@ def init_leases(interface): """Get the list of hosts for an interface.""" net = model.get_network_by_interface(interface) res = "" - for fixed_ip in net.hosts: - res += "%s\n" % linux_net.host_dhcp(fixed_ip, net.hosts[fixed_ip]) + for address in net.address_objs: + res += "%s\n" % linux_net.host_dhcp(address) return res diff --git a/nova/compute/model.py b/nova/compute/model.py index 94fe43c1a1..266a93b9a3 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -123,15 +123,7 @@ class Instance(datastore.BasicModel): 'node_name': 'unassigned', 'project_id': 'unassigned', 'user_id': 'unassigned', - 'private_dns_name': 'unassigned', - 'hostname': self.instance_id} - - - @property - def hostname(self): - # NOTE(vish): this is to be backward compatible with instances that may - # not have been created with a hostname - return self.get('hostname', self.instance_id) + 'private_dns_name': 'unassigned'} @property def identifier(self): diff --git a/nova/datastore.py b/nova/datastore.py index 51ef7a7581..926e41f67f 100644 --- a/nova/datastore.py +++ b/nova/datastore.py @@ -124,12 +124,16 @@ class BasicModel(object): yield cls(identifier) @classmethod - @absorb_connection_error def associated_to(cls, foreign_type, foreign_id): - redis_set = cls._redis_association_name(foreign_type, foreign_id) - for identifier in Redis.instance().smembers(redis_set): + for identifier in cls.associated_keys(foreign_type, foreign_id): yield cls(identifier) + @classmethod + @absorb_connection_error + def associated_keys(cls, foreign_type, foreign_id): + redis_set = cls._redis_association_name(foreign_type, foreign_id) + return Redis.instance().smembers(redis_set) or [] + @classmethod def _redis_set_name(cls, kls_name): # stupidly pluralize (for compatiblity with previous codebase) @@ -138,7 +142,7 @@ class BasicModel(object): @classmethod def _redis_association_name(cls, foreign_type, foreign_id): return cls._redis_set_name("%s:%s:%s" % - (foreign_type, foreign_id, cls.__name__)) + (foreign_type, foreign_id, cls._redis_name())) @property def identifier(self): diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 26071cfeda..c79e96f5dd 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -125,6 +125,12 @@ class CloudController(object): } else: keys = '' + + address_record = network_model.Address(i['private_dns_name']) + if address_record: + hostname = address_record['hostname'] + else: + hostname = 'ip-%s' % i['private_dns_name'].replace('.', '-') data = { 'user-data': base64.b64decode(i['user_data']), 'meta-data': { @@ -137,17 +143,17 @@ class CloudController(object): 'root': '/dev/sda1', 'swap': 'sda3' }, - 'hostname': i.hostname, + 'hostname': hostname, 'instance-action': 'none', 'instance-id': i['instance_id'], 'instance-type': i.get('instance_type', ''), - 'local-hostname': i.hostname, + 'local-hostname': hostname, 'local-ipv4': i['private_dns_name'], # TODO: switch to IP 'kernel-id': i.get('kernel_id', ''), 'placement': { 'availaibility-zone': i.get('availability_zone', 'nova'), }, - 'public-hostname': i.hostname, + 'public-hostname': hostname, 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), @@ -563,14 +569,15 @@ class CloudController(object): is_vpn = False if image_id == FLAGS.vpn_image_id: is_vpn = True + inst = self.instdir.new() allocate_result = yield rpc.call(network_topic, {"method": "allocate_fixed_ip", "args": {"user_id": context.user.id, "project_id": context.project.id, "security_group": security_group, - "is_vpn": is_vpn}}) + "is_vpn": is_vpn, + "hostname": inst.instance_id}}) allocate_data = allocate_result['result'] - inst = self.instdir.new() inst['image_id'] = image_id inst['kernel_id'] = kernel_id inst['ramdisk_id'] = ramdisk_id @@ -584,6 +591,7 @@ class CloudController(object): inst['project_id'] = context.project.id inst['ami_launch_index'] = num inst['security_group'] = security_group + inst['hostname'] = inst.instance_id for (key, value) in allocate_data.iteritems(): inst[key] = value diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 8a8fff2258..4ebc2097b0 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -27,7 +25,6 @@ import os from nova import flags from nova import utils -from nova.compute import model FLAGS = flags.FLAGS @@ -126,14 +123,11 @@ def _dnsmasq_cmd(net): return ''.join(cmd) -def host_dhcp(fixed_ip, mac): - """Return a host string for a fixed_ip and mac""" - instance = model.InstanceDirectory().by_ip(fixed_ip) - if instance is None: - hostname = 'ip-%s' % fixed_ip.replace('.', '-') - else: - hostname = instance.hostname - return "%s,%s.novalocal,%s" % (mac, hostname, fixed_ip) +def host_dhcp(address): + """Return a host string for an address object""" + return "%s,%s.novalocal,%s" % (address['mac'], + address['hostname'], + address.address) # TODO(ja): if the system has restarted or pid numbers have wrapped @@ -148,8 +142,8 @@ def start_dnsmasq(network): signal causing it to reload, otherwise spawn a new instance """ with open(dhcp_file(network['vlan'], 'conf'), 'w') as f: - for fixed_ip in network.hosts: - f.write("%s\n" % host_dhcp(fixed_ip, network.hosts[fixed_ip])) + for address in network.assigned_objs: + f.write("%s\n" % host_dhcp(address)) pid = dnsmasq_pid_for(network) diff --git a/nova/network/model.py b/nova/network/model.py index 7b1e16f261..ce93450674 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -143,13 +143,64 @@ class Vlan(datastore.BasicModel): network[start + FLAGS.network_size - 1]) +class Address(datastore.BasicModel): + """Represents a fixed ip in the datastore""" + override_type = "address" + + def __init__(self, address): + self.address = address + super(Address, self).__init__() + + @property + def identifier(self): + return self.address + + def default_state(self): + return {'address': self.address} + + @classmethod + # pylint: disable=R0913 + def create(cls, user_id, project_id, address, mac, hostname, network_id): + """Creates an Address object""" + addr = cls(address) + addr['user_id'] = user_id + addr['project_id'] = project_id + addr['mac'] = mac + if hostname is None: + hostname = "ip-%s" % address.replace('.', '-') + addr['hostname'] = hostname + addr['network_id'] = network_id + addr.save() + return addr + + def save(self): + is_new = self.is_new_record() + success = super(Address, self).save() + if success and is_new: + self.associate_with("network", self['network_id']) + + def destroy(self): + self.unassociate_with("network", self['network_id']) + super(Address, self).destroy() + + +class PublicAddress(Address): + """Represents an elastic ip in the datastore""" + override_type = "address" + + def default_state(self): + return {'address': self.address, + 'instance_id': 'available', + 'private_ip': 'available'} + + # CLEANUP: -# TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients # TODO(ja): does vlanpool "keeper" need to know the min/max - # shouldn't FLAGS always win? class BaseNetwork(datastore.BasicModel): """Implements basic logic for allocating ips in a network""" override_type = 'network' + address_class = Address @property def identifier(self): @@ -214,28 +265,31 @@ class BaseNetwork(datastore.BasicModel): """Returns the project associated with this network""" return manager.AuthManager().get_project(self['project_id']) - @property - def _hosts_key(self): - """Datastore key where hosts are stored""" - return "network:%s:hosts" % (self['network_str']) - - @property - def hosts(self): - """Returns a hash of all hosts allocated in this network""" - return datastore.Redis.instance().hgetall(self._hosts_key) or {} - - def _add_host(self, _user_id, _project_id, host, target): + # pylint: disable=R0913 + def _add_host(self, user_id, project_id, ip_address, mac, hostname): """Add a host to the datastore""" - datastore.Redis.instance().hset(self._hosts_key, host, target) + Address.create(user_id, project_id, ip_address, + mac, hostname, self.identifier) - def _rem_host(self, host): + def _rem_host(self, ip_address): """Remove a host from the datastore""" - datastore.Redis.instance().hdel(self._hosts_key, host) + Address(ip_address).destroy() @property def assigned(self): - """Returns a list of all assigned keys""" - return datastore.Redis.instance().hkeys(self._hosts_key) + """Returns a list of all assigned addresses""" + return self.address_class.associated_keys('network', self.identifier) + + @property + def assigned_objs(self): + """Returns a list of all assigned addresses as objects""" + return self.address_class.associated_to('network', self.identifier) + + def get_address(self, ip_address): + """Returns a specific ip as an object""" + if ip_address in self.assigned: + return self.address_class(ip_address) + return None @property def available(self): @@ -243,7 +297,7 @@ class BaseNetwork(datastore.BasicModel): for idx in range(self.num_bottom_reserved_ips, len(self.network) - self.num_top_reserved_ips): address = str(self.network[idx]) - if not address in self.hosts.keys(): + if not address in self.assigned: yield address @property @@ -256,11 +310,11 @@ class BaseNetwork(datastore.BasicModel): """Returns number of ips reserved at the top of the range""" return 1 # Broadcast - def allocate_ip(self, user_id, project_id, mac): + def allocate_ip(self, user_id, project_id, mac, hostname=None): """Allocates an ip to a mac address""" for address in self.available: logging.debug("Allocating IP %s to %s", address, project_id) - self._add_host(user_id, project_id, address, mac) + self._add_host(user_id, project_id, address, mac, hostname) self.express(address=address) return address raise exception.NoMoreAddresses("Project %s with network %s" % @@ -287,11 +341,6 @@ class BaseNetwork(datastore.BasicModel): # dnsmasq to confirm that it has been released. logging.debug("Deallocating allocated IP %s", ip_str) - def list_addresses(self): - """List all allocated addresses""" - for address in self.hosts: - yield address - def express(self, address=None): """Set up network. Implemented in subclasses""" pass @@ -383,10 +432,10 @@ class DHCPNetwork(BridgedNetwork): logging.debug("Not launching dnsmasq: no hosts.") self.express_vpn() - def allocate_vpn_ip(self, user_id, project_id, mac): + def allocate_vpn_ip(self, user_id, project_id, mac, hostname=None): """Allocates the reserved ip to a vpn instance""" address = str(self.network[2]) - self._add_host(user_id, project_id, address, mac) + self._add_host(user_id, project_id, address, mac, hostname) self.express(address=address) return address @@ -407,40 +456,13 @@ class DHCPNetwork(BridgedNetwork): else: linux_net.start_dnsmasq(self) - -class PublicAddress(datastore.BasicModel): - """Represents an elastic ip in the datastore""" - override_type = "address" - - def __init__(self, address): - self.address = address - super(PublicAddress, self).__init__() - - @property - def identifier(self): - return self.address - - def default_state(self): - return {'address': self.address} - - @classmethod - def create(cls, user_id, project_id, address): - """Creates a PublicAddress object""" - addr = cls(address) - addr['user_id'] = user_id - addr['project_id'] = project_id - addr['instance_id'] = 'available' - addr['private_ip'] = 'available' - addr.save() - return addr - - DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): """Handles elastic ips""" override_type = 'network' + address_class = PublicAddress def __init__(self, *args, **kwargs): network_id = "public:default" @@ -454,26 +476,6 @@ class PublicNetworkController(BaseNetwork): self.save() self.express() - @property - def host_objs(self): - """Returns assigned addresses as PublicAddress objects""" - for address in self.assigned: - yield PublicAddress(address) - - def get_host(self, public_ip): - """Returns a specific public ip as PublicAddress object""" - if public_ip in self.assigned: - return PublicAddress(public_ip) - return None - - def _add_host(self, user_id, project_id, host, _target): - datastore.Redis.instance().hset(self._hosts_key, host, project_id) - PublicAddress.create(user_id, project_id, host) - - def _rem_host(self, host): - PublicAddress(host).destroy() - datastore.Redis.instance().hdel(self._hosts_key, host) - def deallocate_ip(self, ip_str): # NOTE(vish): cleanup is now done on release by the parent class self.release_ip(ip_str) @@ -483,10 +485,10 @@ class PublicNetworkController(BaseNetwork): if not public_ip in self.assigned: raise exception.AddressNotAllocated() # TODO(josh): Keep an index going both ways - for addr in self.host_objs: + for addr in self.assigned_objs: if addr.get('private_ip', None) == private_ip: raise exception.AddressAlreadyAssociated() - addr = self.get_host(public_ip) + addr = self.get_address(public_ip) if addr.get('private_ip', 'available') != 'available': raise exception.AddressAlreadyAssociated() addr['private_ip'] = private_ip @@ -498,7 +500,7 @@ class PublicNetworkController(BaseNetwork): """Disassociates a public ip with its private ip""" if not public_ip in self.assigned: raise exception.AddressNotAllocated() - addr = self.get_host(public_ip) + addr = self.get_address(public_ip) if addr.get('private_ip', 'available') == 'available': raise exception.AddressNotAssociated() self.deexpress(address=public_ip) @@ -507,9 +509,12 @@ class PublicNetworkController(BaseNetwork): addr.save() def express(self, address=None): - addresses = self.host_objs if address: - addresses = [self.get_host(address)] + if not address in self.assigned: + raise exception.AddressNotAllocated() + addresses = [self.get_address(address)] + else: + addresses = self.assigned_objs for addr in addresses: if addr.get('private_ip', 'available') == 'available': continue @@ -529,7 +534,7 @@ class PublicNetworkController(BaseNetwork): % (private_ip, protocol, port)) def deexpress(self, address=None): - addr = self.get_host(address) + addr = self.get_address(address) private_ip = addr['private_ip'] linux_net.unbind_public_ip(address, FLAGS.public_interface) linux_net.remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s" @@ -592,16 +597,10 @@ def get_project_network(project_id, security_group='default'): def get_network_by_address(address): """Gets the network for a given private ip""" - # TODO(vish): This is completely the wrong way to do this, but - # I'm getting the network binary working before I - # tackle doing this the right way. - logging.debug("Get Network By Address: %s", address) - for project in manager.AuthManager().get_projects(): - net = get_project_network(project.id) - if address in net.assigned: - logging.debug("Found %s in %s", address, project.id) - return net - raise exception.AddressNotAllocated() + address_record = Address.lookup(address) + if not address_record: + raise exception.AddressNotAllocated() + return get_project_network(address_record['project_id']) def get_network_by_interface(iface, security_group='default'): diff --git a/nova/network/service.py b/nova/network/service.py index fd45496c9f..9c0f5520bf 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -152,7 +152,9 @@ class FlatNetworkService(BaseNetworkService): """Network is created manually""" pass - def allocate_fixed_ip(self, user_id, project_id, + def allocate_fixed_ip(self, + user_id, + project_id, security_group='default', *args, **kwargs): """Gets a fixed ip from the pool @@ -161,7 +163,7 @@ class FlatNetworkService(BaseNetworkService): """ # NOTE(vish): Some automation could be done here. For example, # creating the flat_network_bridge and setting up - # a gateway. This is all done manually atm + # a gateway. This is all done manually atm. redis = datastore.Redis.instance() if not redis.exists('ips') and not len(redis.keys('instances:*')): for fixed_ip in FLAGS.flat_network_ips: @@ -169,6 +171,8 @@ class FlatNetworkService(BaseNetworkService): fixed_ip = redis.spop('ips') if not fixed_ip: raise exception.NoMoreAddresses() + # TODO(vish): some sort of dns handling for hostname should + # probably be done here. return {'inject_network': True, 'network_type': FLAGS.network_type, 'mac_address': utils.generate_mac(), @@ -192,16 +196,26 @@ class VlanNetworkService(BaseNetworkService): # to support vlans separately from dhcp, instead of having # both of them together in this class. # pylint: disable=W0221 - def allocate_fixed_ip(self, user_id, project_id, + def allocate_fixed_ip(self, + user_id, + project_id, security_group='default', - is_vpn=False, *args, **kwargs): + is_vpn=False, + hostname=None, + *args, **kwargs): """Gets a fixed ip from the pool""" mac = utils.generate_mac() net = model.get_project_network(project_id) if is_vpn: - fixed_ip = net.allocate_vpn_ip(user_id, project_id, mac) + fixed_ip = net.allocate_vpn_ip(user_id, + project_id, + mac, + hostname) else: - fixed_ip = net.allocate_ip(user_id, project_id, mac) + fixed_ip = net.allocate_ip(user_id, + project_id, + mac, + hostname) return {'network_type': FLAGS.network_type, 'bridge_name': net['bridge_name'], 'mac_address': mac, diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 5671a88868..0395098092 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -202,8 +202,8 @@ class NetworkTestCase(test.TrialTestCase): secondmac = result['mac_address'] secondaddress = result['private_dns_name'] self.assertEqual(address, secondaddress) - self.service.deallocate_fixed_ip(secondaddress) issue_ip(secondmac, secondaddress, hostname, net.bridge_name) + self.service.deallocate_fixed_ip(secondaddress) release_ip(secondmac, secondaddress, hostname, net.bridge_name) def test_available_ips(self): @@ -218,7 +218,7 @@ class NetworkTestCase(test.TrialTestCase): services (network, gateway, CloudPipe, broadcast) """ net = model.get_project_network(self.projects[0].id, "default") - num_preallocated_ips = len(net.hosts.keys()) + num_preallocated_ips = len(net.assigned) net_size = flags.FLAGS.network_size num_available_ips = net_size - (net.num_bottom_reserved_ips + num_preallocated_ips + @@ -254,7 +254,7 @@ class NetworkTestCase(test.TrialTestCase): def is_in_project(address, project_id): """Returns true if address is in specified project""" - return address in model.get_project_network(project_id).list_addresses() + return address in model.get_project_network(project_id).assigned def binpath(script): @@ -272,6 +272,7 @@ def issue_ip(mac, private_ip, hostname, interface): (out, err) = utils.execute(cmd, addl_env=env) logging.debug("ISSUE_IP: %s, %s ", out, err) + def release_ip(mac, private_ip, hostname, interface): """Run del command on dhcpbridge""" cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'), From 1637c33927672a6edc9ad7a994787669ea47f602 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 11 Aug 2010 09:46:08 -0400 Subject: [PATCH 058/101] Serializing in middleware after all... by tying to the router. maybe a good idea? --- nova/wsgi.py | 119 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 75 insertions(+), 44 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 2716481058..c511a3f068 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -141,15 +141,24 @@ class ParsedRoutes(Middleware): app = environ['wsgiorg.routing_args'][1]['controller'] return app(environ, start_response) -class MichaelRouter(object): +class MichaelRouterMiddleware(object): """ - My attempt at a routing class. Just override __init__ to call - super, then set up routes in self.map. + Router that maps incoming requests to WSGI apps or to standard + controllers+actions. The response will be a WSGI response; standard + controllers+actions will by default have their results serialized + to the requested Content Type, or you can subclass and override + _to_webob_response to customize this. """ - def __init__(self): - self.map = routes.Mapper() - self._router = routes.middleware.RoutesMiddleware(self._proceed, self.map) + def __init__(self, map): + """ + Create a router for the given routes.Mapper. It may contain standard + routes (i.e. specifying controllers and actions), or may route to a + WSGI app by instead specifying a wsgi_app=SomeApp() parameter in + map.connect(). + """ + self.map = map + self._router = routes.middleware.RoutesMiddleware(self.__proceed, self.map) @webob.dec.wsgify def __call__(self, req): @@ -160,62 +169,84 @@ class MichaelRouter(object): return self._router @webob.dec.wsgify - def _proceed(self, req): - """ - Called by self._router after matching the incoming request to a route - and putting the information into req.environ. - """ + @staticmethod + def __proceed(req): + # Called by self._router after matching the incoming request to a route + # and putting the information into req.environ. Either returns 404, the + # routed WSGI app, or _to_webob_response(the action result). + if req.environ['routes.route'] is None: return webob.exc.HTTPNotFound() match = environ['wsgiorg.routing_args'][1] - if match.get('_is_wsgi', False): - wsgiapp = match['controller'] - return req.get_response(wsgiapp) + if 'wsgi_app' in match: + return match['wsgi_app'] else: - # TODO(gundlach): doubt this is the right way -- and it really - # feels like this code should exist somewhere already on the - # internet + kwargs = match.copy() controller, action = match['controller'], match['action'] - delete match['controller'] - delete match['action'] - return _as_response(getattr(controller, action)(**match)) + delete kwargs['controller'] + delete kwargs['action'] + return _to_webob_response(req, getattr(controller, action)(**kwargs)) - controller = environ['wsgiorg.routing_args'][1]['controller'] - self._dispatch(controller) - - def _as_response(self, result): + def _to_webob_response(self, req, result): """ - When routing to a non-wsgi controller+action, its result will - be passed here before returning up the WSGI chain to be converted - into a webob.Response + When routing to a non-WSGI controller+action, the webob.Request and the + action's result will be passed here to be converted into a + webob.Response before returning up the WSGI chain. By default it + serializes to the requested Content Type. + """ + return Serializer(req).serialize(result) + +class Serializer(object): + """ + Serializes a dictionary to a Content Type specified by a WSGI environment. + """ + + def __init__(self, environ): + """Create a serializer based on the given WSGI environment.""" + self.environ = environ + + def serialize(self, data): + req = webob.Request(environ) + # TODO(gundlach): temp + if 'applicatio/json' in req.accept): + import json + return json.dumps(result) + else: + return '' + repr(data) + '' - - - -class ApiVersionRouter(MichaelRouter): +class ApiVersionRouter(MichaelRouterMiddleware): def __init__(self): - super(ApiVersionRouter, self).__init__(self) + map = routes.Mapper() - self.map.connect(None, "/v1.0/{path_info:.*}", controller=RsApiRouter()) - self.map.connect(None, "/ec2/{path_info:.*}", controller=Ec2ApiRouter()) + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app=RsApiRouter()) + map.connect(None, "/ec2/{path_info:.*}", wsgi_app=Ec2ApiRouter()) -class RsApiRouter(MichaelRouter): + super(ApiVersionRouter, self).__init__(self, map) + +class RsApiRouter(MichaelRouterMiddleware): def __init__(self): - super(RsApiRouter, self).__init__(self) + map = routes.Mapper() - self.map.resource("server", "servers", controller=CloudServersServerApi()) - self.map.resource("image", "images", controller=CloudServersImageApi()) - self.map.resource("flavor", "flavors", controller=CloudServersFlavorApi()) - self.map.resource("sharedipgroup", "sharedipgroups", - controller=CloudServersSharedIpGroupApi()) + map.resource("server", "servers", controller=ServerController()) + map.resource("image", "images", controller=ImageController()) + map.resource("flavor", "flavors", controller=FlavorController()) + map.resource("sharedipgroup", "sharedipgroups", + controller=SharedIpGroupController()) + + super(RsApiRouter, self).__init__(self, map) class Ec2ApiRouter(object): + @webob.dec.wsgify + def __call__(self, req): + return 'dummy response' + +class ServerController(object): def __getattr__(self, key): - return lambda *x: {'dummy response': 'i am a dummy response'} -CloudServersServerApi = CloudServersImageApi = CloudServersFlavorApi = \ - CloudServersSharedIpGroupApi = Ec2ApiRouter + return {'dummy': 'dummy response'} +ImageController = FlavorController = SharedIpGroupController = ServerController + class Router(Middleware): # pylint: disable-msg=R0921 """Wrapper to help setup routes.middleware.RoutesMiddleware.""" From a0fb0fdf1e899488f0717bea6ee2cad58120070b Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 11 Aug 2010 14:46:43 -0400 Subject: [PATCH 059/101] Working router that can target WSGI middleware or a standard controller+action --- nova/wsgi.py | 211 ++++++++++++++++++++++++--------------------------- 1 file changed, 101 insertions(+), 110 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index c511a3f068..81890499ec 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -29,6 +29,8 @@ import eventlet.wsgi eventlet.patcher.monkey_patch(all=False, socket=True) import routes import routes.middleware +import webob.dec +import webob.exc logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) @@ -89,75 +91,80 @@ class Middleware(Application): # pylint: disable-msg=W0223 class Debug(Middleware): - """Helper class that can be insertd into any WSGI application chain + """Helper class that can be inserted into any WSGI application chain to get information about the request and response.""" - def __call__(self, environ, start_response): - for key, value in environ.items(): + @webob.dec.wsgify + def __call__(self, req): + print ("*" * 40) + " REQUEST ENVIRON" + for key, value in req.environ.items(): print key, "=", value print - wrapper = debug_start_response(start_response) - return debug_print_body(self.application(environ, wrapper)) + resp = req.get_response(self.application) - -def debug_start_response(start_response): - """Wrap the start_response to capture when called.""" - - def wrapper(status, headers, exc_info=None): - """Print out all headers when start_response is called.""" - print status - for (key, value) in headers: + print ("*" * 40) + " RESPONSE HEADERS" + for (key, value) in resp.headers: print key, "=", value print - start_response(status, headers, exc_info) - return wrapper + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """ + Iterator that prints the contents of a wrapper string iterator + when iterated. + """ + print ("*" * 40) + "BODY" + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print -def debug_print_body(body): - """Print the body of the response as it is sent back.""" - - class Wrapper(object): - """Iterate through all the body parts and print before returning.""" - - def __iter__(self): - for part in body: - sys.stdout.write(part) - sys.stdout.flush() - yield part - print - - return Wrapper() - - -class ParsedRoutes(Middleware): - """Processed parsed routes from routes.middleware.RoutesMiddleware - and call either the controller if found or the default application - otherwise.""" - - def __call__(self, environ, start_response): - if environ['routes.route'] is None: - return self.application(environ, start_response) - app = environ['wsgiorg.routing_args'][1]['controller'] - return app(environ, start_response) - -class MichaelRouterMiddleware(object): +class Router(object): """ - Router that maps incoming requests to WSGI apps or to standard - controllers+actions. The response will be a WSGI response; standard - controllers+actions will by default have their results serialized - to the requested Content Type, or you can subclass and override - _to_webob_response to customize this. + WSGI middleware that maps incoming requests to targets. + + Non-WSGI-app targets have their results converted to a WSGI response + automatically -- by default, they are serialized according to the Content + Type from the request. This behavior can be changed by overriding + _to_webob_response(). """ - def __init__(self, map): + def __init__(self, map, targets): """ - Create a router for the given routes.Mapper. It may contain standard - routes (i.e. specifying controllers and actions), or may route to a - WSGI app by instead specifying a wsgi_app=SomeApp() parameter in - map.connect(). + Create a router for the given routes.Mapper `map`. + + Each route in `map` must contain either + - a 'wsgi_app' string or + - a 'controller' string and an 'action' string. + + 'wsgi_app' is a key into the `target` dictionary whose value + is a WSGI app. 'controller' is a key into `target' whose value is + a class instance containing the method specified by 'action'. + + Examples: + map = routes.Mapper() + targets = { "servers": ServerController(), "blog": BlogWsgiApp() } + + # Explicit mapping of one route to a controller+action + map.connect(None, "/serverlist", controller="servers", action="list") + + # Controller string is implicitly equal to 2nd param here, and + # actions are all implicitly defined + map.resource("server", "servers") + + # Pointing to a WSGI app. You'll need to specify the {path_info:.*} + # parameter so the target app can work with just his section of the + # URL. + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app="blog") """ self.map = map + self.targets = targets self._router = routes.middleware.RoutesMiddleware(self.__proceed, self.map) @webob.dec.wsgify @@ -169,23 +176,28 @@ class MichaelRouterMiddleware(object): return self._router @webob.dec.wsgify - @staticmethod - def __proceed(req): + def __proceed(self, req): # Called by self._router after matching the incoming request to a route # and putting the information into req.environ. Either returns 404, the # routed WSGI app, or _to_webob_response(the action result). if req.environ['routes.route'] is None: return webob.exc.HTTPNotFound() - match = environ['wsgiorg.routing_args'][1] + match = req.environ['wsgiorg.routing_args'][1] if 'wsgi_app' in match: - return match['wsgi_app'] + app_name = match['wsgi_app'] + app = self.targets[app_name] + return app else: kwargs = match.copy() - controller, action = match['controller'], match['action'] - delete kwargs['controller'] - delete kwargs['action'] - return _to_webob_response(req, getattr(controller, action)(**kwargs)) + controller_name, action = match['controller'], match['action'] + del kwargs['controller'] + del kwargs['action'] + + controller = self.targets[controller_name] + method = getattr(controller, action) + result = method(**kwargs) + return self._to_webob_response(req, result) def _to_webob_response(self, req, result): """ @@ -194,7 +206,8 @@ class MichaelRouterMiddleware(object): webob.Response before returning up the WSGI chain. By default it serializes to the requested Content Type. """ - return Serializer(req).serialize(result) + return Serializer(req.environ).serialize(result) + class Serializer(object): """ @@ -206,75 +219,53 @@ class Serializer(object): self.environ = environ def serialize(self, data): - req = webob.Request(environ) + req = webob.Request(self.environ) # TODO(gundlach): temp - if 'applicatio/json' in req.accept): + if req.accept and 'application/json' in req.accept: import json - return json.dumps(result) + return json.dumps(data) else: return '' + repr(data) + '' -class ApiVersionRouter(MichaelRouterMiddleware): +class ApiVersionRouter(Router): def __init__(self): map = routes.Mapper() - map.connect(None, "/v1.0/{path_info:.*}", wsgi_app=RsApiRouter()) - map.connect(None, "/ec2/{path_info:.*}", wsgi_app=Ec2ApiRouter()) + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app="rs") + map.connect(None, "/ec2/{path_info:.*}", wsgi_app="ec2") - super(ApiVersionRouter, self).__init__(self, map) + targets = { "rs": RsApiRouter(), "ec2": Ec2ApiRouter() } -class RsApiRouter(MichaelRouterMiddleware): + super(ApiVersionRouter, self).__init__(map, targets) + +class RsApiRouter(Router): def __init__(self): map = routes.Mapper() - map.resource("server", "servers", controller=ServerController()) - map.resource("image", "images", controller=ImageController()) - map.resource("flavor", "flavors", controller=FlavorController()) - map.resource("sharedipgroup", "sharedipgroups", - controller=SharedIpGroupController()) + map.resource("server", "servers") + map.resource("image", "images") + map.resource("flavor", "flavors") + map.resource("sharedipgroup", "sharedipgroups") - super(RsApiRouter, self).__init__(self, map) + targets = { + 'servers': ServerController(), + 'images': ImageController(), + 'flavors': FlavorController(), + 'sharedipgroups': SharedIpGroupController() + } + super(RsApiRouter, self).__init__(map, targets) + +# TODO(gundlach): temp class Ec2ApiRouter(object): @webob.dec.wsgify def __call__(self, req): return 'dummy response' - +# TODO(gundlach): temp class ServerController(object): def __getattr__(self, key): - return {'dummy': 'dummy response'} + return lambda **args: {key: 'dummy response for %s' % repr(args)} +# TODO(gundlach): temp ImageController = FlavorController = SharedIpGroupController = ServerController - - -class Router(Middleware): # pylint: disable-msg=R0921 - """Wrapper to help setup routes.middleware.RoutesMiddleware.""" - - def __init__(self, application): - self.map = routes.Mapper() - self._build_map() - application = ParsedRoutes(application) - application = routes.middleware.RoutesMiddleware(application, self.map) - super(Router, self).__init__(application) - - def __call__(self, environ, start_response): - return self.application(environ, start_response) - - def _build_map(self): - """Method to create new connections for the routing map.""" - raise NotImplementedError("You must implement _build_map") - - def _connect(self, *args, **kwargs): - """Wrapper for the map.connect method.""" - self.map.connect(*args, **kwargs) - - -def route_args(application): - """Decorator to make grabbing routing args more convenient.""" - - def wrapper(self, req): - """Call application with req and parsed routing args from.""" - return application(self, req, req.environ['wsgiorg.routing_args'][1]) - - return wrapper From 2e753b033dae6270674c0397be8e01bd2ff47980 Mon Sep 17 00:00:00 2001 From: Matthew Dietz Date: Wed, 11 Aug 2010 15:27:27 -0500 Subject: [PATCH 060/101] Prototype implementation of Servers controller --- nova/endpoint/{ => aws}/cloud.py | 0 nova/endpoint/{ => aws}/images.py | 0 nova/endpoint/rackspace/controllers/base.py | 9 +++ .../endpoint/rackspace/controllers/flavors.py | 0 nova/endpoint/rackspace/controllers/images.py | 0 .../endpoint/rackspace/controllers/servers.py | 72 +++++++++++++++++++ .../rackspace/controllers/shared_ip_groups.py | 0 nova/endpoint/{ => rackspace}/rackspace.py | 27 ++++--- 8 files changed, 93 insertions(+), 15 deletions(-) rename nova/endpoint/{ => aws}/cloud.py (100%) rename nova/endpoint/{ => aws}/images.py (100%) create mode 100644 nova/endpoint/rackspace/controllers/base.py create mode 100644 nova/endpoint/rackspace/controllers/flavors.py create mode 100644 nova/endpoint/rackspace/controllers/images.py create mode 100644 nova/endpoint/rackspace/controllers/servers.py create mode 100644 nova/endpoint/rackspace/controllers/shared_ip_groups.py rename nova/endpoint/{ => rackspace}/rackspace.py (89%) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/aws/cloud.py similarity index 100% rename from nova/endpoint/cloud.py rename to nova/endpoint/aws/cloud.py diff --git a/nova/endpoint/images.py b/nova/endpoint/aws/images.py similarity index 100% rename from nova/endpoint/images.py rename to nova/endpoint/aws/images.py diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py new file mode 100644 index 0000000000..a83925cc38 --- /dev/null +++ b/nova/endpoint/rackspace/controllers/base.py @@ -0,0 +1,9 @@ +class BaseController(object): + @classmethod + def render(cls, instance): + if isinstance(instance, list): + return [ cls.entity_name : { cls.render(instance) } + else + return + + diff --git a/nova/endpoint/rackspace/controllers/flavors.py b/nova/endpoint/rackspace/controllers/flavors.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nova/endpoint/rackspace/controllers/images.py b/nova/endpoint/rackspace/controllers/images.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/endpoint/rackspace/controllers/servers.py new file mode 100644 index 0000000000..af6c958bbf --- /dev/null +++ b/nova/endpoint/rackspace/controllers/servers.py @@ -0,0 +1,72 @@ +from nova import rpc +from nova.compute import model as compute +from nova.endpoint.rackspace import BaseController + +class ServersController(BaseController): + entity_name = 'servers' + + def __init__(self): + raise NotImplemented("You may not create an instance of this class") + + @classmethod + def index(cls): + return [instance_details(inst) for inst in compute.InstanceDirectory().all] + + @classmethod + def show(cls, **kwargs): + instance_id = kwargs['id'] + return compute.InstanceDirectory().get(instance_id) + + @classmethod + def delete(cls, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance + raise ServerNotFound("The requested server was not found") + instance.destroy() + return True + + @classmethod + def create(cls, **kwargs): + inst = self.build_server_instance(kwargs['server']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + @classmethod + def update(cls, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.update(kwargs['server']) + instance.save() + + @classmethod + def build_server_instance(self, env): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = env['user']['id'] + inst['project_id'] = env['project']['id'] + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst diff --git a/nova/endpoint/rackspace/controllers/shared_ip_groups.py b/nova/endpoint/rackspace/controllers/shared_ip_groups.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace/rackspace.py similarity index 89% rename from nova/endpoint/rackspace.py rename to nova/endpoint/rackspace/rackspace.py index b4e6cd823e..75b828e91d 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace/rackspace.py @@ -45,20 +45,18 @@ class API(wsgi.Middleware): def __init__(self): super(API, self).__init__(Router(webob.exc.HTTPNotFound())) - @webob.dec.wsgify - def __call__(self, req): - return self.application + def __call__(self, environ, start_response): context = {} - if "HTTP_X_AUTH_TOKEN" in req.environ: + if "HTTP_X_AUTH_TOKEN" in environ: context['user'] = manager.AuthManager().get_user_from_access_key( - req.environ['HTTP_X_AUTH_TOKEN']) + environ['HTTP_X_AUTH_TOKEN']) if context['user']: context['project'] = manager.AuthManager().get_project( context['user'].name) if "user" not in context: - return webob.exc.HTTPForbidden() + return webob.exc.HTTPForbidden()(environ, start_response) environ['nova.context'] = context - return self.application + return self.application(environ, start_response) class Router(wsgi.Router): @@ -66,14 +64,13 @@ class Router(wsgi.Router): def _build_map(self): """Build routing map for authentication and cloud.""" - self.map.resource("server", "servers", controller=CloudServerAPI()) - #self._connect("/v1.0", controller=AuthenticationAPI()) - #cloud = CloudServerAPI() - #self._connect("/servers", controller=cloud.launch_server, - # conditions={"method": ["POST"]}) - #self._connect("/servers/{server_id}", controller=cloud.delete_server, - # conditions={'method': ["DELETE"]}) - #self._connect("/servers", controller=cloud) + self._connect("/v1.0", controller=AuthenticationAPI()) + cloud = CloudServerAPI() + self._connect("/servers", controller=cloud.launch_server, + conditions={"method": ["POST"]}) + self._connect("/servers/{server_id}", controller=cloud.delete_server, + conditions={'method': ["DELETE"]}) + self._connect("/servers", controller=cloud) class AuthenticationAPI(wsgi.Application): From fb382c8e705e1803abb5de77a1fd11e6f913af75 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Wed, 11 Aug 2010 17:40:28 -0400 Subject: [PATCH 061/101] Adapts the run_tests.sh script to allow interactive or automated creation of virtualenv, or to run tests outside of a virtualenv --- run_tests.sh | 63 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 3 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 85d7c88341..31bfce9fac 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,12 +1,69 @@ -#!/bin/bash +#!/bin/bash + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run Nova's test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -h, --help Print this usage message" + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_options { + array=$1 + elements=${#array[@]} + for (( x=0;x<$elements;x++)); do + process_option ${array[${x}]} + done +} + +function process_option { + option=$1 + case $option in + --help) usage;; + -h) usage;; + -V) let always_venv=1; let never_venv=0;; + --virtual-env) let always_venv=1; let never_venv=0;; + -N) let always_venv=0; let never_venv=1;; + --no-virtual-env) let always_venv=0; let never_venv=1;; + esac +} venv=.nova-venv with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +options=("$@") + +process_options $options + +if [ $never_venv -eq 1 ]; then + # Just run the test suites in current environment + python run_tests.py + exit +fi if [ -e ${venv} ]; then ${with_venv} python run_tests.py $@ else - echo "No virtual environment found...creating one" - python tools/install_venv.py + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py + else + python run_tests.py + exit + fi + fi ${with_venv} python run_tests.py $@ fi From 6664c960e08e31fa8b464b0ccbbf489da271e033 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 11 Aug 2010 15:29:14 -0700 Subject: [PATCH 062/101] fix dhcpbridge issues --- bin/nova-dhcpbridge | 2 +- nova/network/linux_net.py | 8 ++------ nova/network/service.py | 8 ++++---- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index b1ad1c8fef..f70a4482c2 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -69,7 +69,7 @@ def init_leases(interface): """Get the list of hosts for an interface.""" net = model.get_network_by_interface(interface) res = "" - for address in net.address_objs: + for address in net.assigned_objs: res += "%s\n" % linux_net.host_dhcp(address) return res diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4ebc2097b0..15050adaf4 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -116,7 +116,7 @@ def _dnsmasq_cmd(net): ' --pid-file=%s' % dhcp_file(net['vlan'], 'pid'), ' --listen-address=%s' % net.dhcp_listen_address, ' --except-interface=lo', - ' --dhcp-range=%s,static,600s' % net.dhcp_range_start, + ' --dhcp-range=%s,static,120s' % net.dhcp_range_start, ' --dhcp-hostsfile=%s' % dhcp_file(net['vlan'], 'conf'), ' --dhcp-script=%s' % bin_file('nova-dhcpbridge'), ' --leasefile-ro'] @@ -153,14 +153,10 @@ def start_dnsmasq(network): # correct dnsmasq process try: os.kill(pid, signal.SIGHUP) + return except Exception as exc: # pylint: disable=W0703 logging.debug("Hupping dnsmasq threw %s", exc) - # otherwise delete the existing leases file and start dnsmasq - lease_file = dhcp_file(network['vlan'], 'leases') - if os.path.exists(lease_file): - os.unlink(lease_file) - # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, 'DNSMASQ_INTERFACE': network['bridge_name']} diff --git a/nova/network/service.py b/nova/network/service.py index 9c0f5520bf..625f20dd4f 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -226,13 +226,13 @@ class VlanNetworkService(BaseNetworkService): """Returns an ip to the pool""" return model.get_network_by_address(fixed_ip).deallocate_ip(fixed_ip) - def lease_ip(self, address): + def lease_ip(self, fixed_ip): """Called by bridge when ip is leased""" - return model.get_network_by_address(address).lease_ip(address) + return model.get_network_by_address(fixed_ip).lease_ip(fixed_ip) - def release_ip(self, address): + def release_ip(self, fixed_ip): """Called by bridge when ip is released""" - return model.get_network_by_address(address).release_ip(address) + return model.get_network_by_address(fixed_ip).release_ip(fixed_ip) def restart_nets(self): """Ensure the network for each user is enabled""" From f8fc15c645216483ac20280af0e6e6bb92b6be0b Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 12 Aug 2010 14:30:54 +0200 Subject: [PATCH 063/101] Parameterise libvirt URI. --- nova/virt/libvirt_conn.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 551ba6e54b..c3175b6fef 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -43,6 +43,9 @@ libvirt = None libxml2 = None FLAGS = flags.FLAGS +flags.DEFINE_string('libvirt_uri', + 'qemu:///system', + 'Libvirt connection URI') flags.DEFINE_string('libvirt_xml_template', utils.abspath('compute/libvirt.xml.template'), 'Libvirt XML Template') @@ -72,9 +75,9 @@ class LibvirtConnection(object): 'root', None] if read_only: - self._conn = libvirt.openReadOnly('qemu:///system') + self._conn = libvirt.openReadOnly(FLAGS.libvirt_uri) else: - self._conn = libvirt.openAuth('qemu:///system', auth, 0) + self._conn = libvirt.openAuth(FLAGS.libvirt_uri, auth, 0) def list_instances(self): From 0493f4bc5786a4d253e7f73092443117b158071a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 12 Aug 2010 14:32:11 +0200 Subject: [PATCH 064/101] Move libvirt.xml template into nova/virt --- nova/{compute => virt}/libvirt.xml.template | 0 nova/virt/libvirt_conn.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename nova/{compute => virt}/libvirt.xml.template (100%) diff --git a/nova/compute/libvirt.xml.template b/nova/virt/libvirt.xml.template similarity index 100% rename from nova/compute/libvirt.xml.template rename to nova/virt/libvirt.xml.template diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index c3175b6fef..715c4487d3 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -50,7 +50,7 @@ flags.DEFINE_string('libvirt_xml_template', utils.abspath('compute/libvirt.xml.template'), 'Libvirt XML Template') flags.DEFINE_string('injected_network_template', - utils.abspath('compute/interfaces.template'), + utils.abspath('virt/interfaces.template'), 'Template file for injected network') flags.DEFINE_string('libvirt_type', From 4d7fe5555de3c7e475a436af11559b00d7af5790 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 11:52:32 -0700 Subject: [PATCH 065/101] remove syslog-ng workaround --- nova/twistd.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/nova/twistd.py b/nova/twistd.py index c83276daab..8de322aa5d 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -241,15 +241,7 @@ def serve(filename): print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) - class NoNewlineFormatter(logging.Formatter): - """Strips newlines from default formatter""" - def format(self, record): - """Grabs default formatter's output and strips newlines""" - data = logging.Formatter.format(self, record) - return data.replace("\n", "--") - - # NOTE(vish): syslog-ng doesn't handle newlines from trackbacks very well - formatter = NoNewlineFormatter( + formatter = logging.Formatter( '(%(name)s): %(levelname)s %(message)s') handler = logging.StreamHandler(log.StdioOnnaStick()) handler.setFormatter(formatter) From e14d70d7be58ac99f98b66620320c453fa79c8c8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 12:12:38 -0700 Subject: [PATCH 066/101] keep track of leasing state so we can delete ips that didn't ever get leased --- nova/network/model.py | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index ce93450674..49c12e4599 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -155,8 +155,10 @@ class Address(datastore.BasicModel): def identifier(self): return self.address + # NOTE(vish): address states allocated, leased, deallocated def default_state(self): - return {'address': self.address} + return {'address': self.address, + 'state': 'none'} @classmethod # pylint: disable=R0913 @@ -170,6 +172,7 @@ class Address(datastore.BasicModel): hostname = "ip-%s" % address.replace('.', '-') addr['hostname'] = hostname addr['network_id'] = network_id + addr['state'] = 'allocated' addr.save() return addr @@ -322,7 +325,13 @@ class BaseNetwork(datastore.BasicModel): def lease_ip(self, ip_str): """Called when DHCP lease is activated""" - logging.debug("Leasing allocated IP %s", ip_str) + if not ip_str in self.assigned: + raise exception.AddressNotAllocated() + address = self.get_address(ip_str) + if address: + logging.debug("Leasing allocated IP %s", ip_str) + address['state'] = 'leased' + address.save() def release_ip(self, ip_str): """Called when DHCP lease expires @@ -330,16 +339,23 @@ class BaseNetwork(datastore.BasicModel): Removes the ip from the assigned list""" if not ip_str in self.assigned: raise exception.AddressNotAllocated() + logging.debug("Releasing IP %s", ip_str) self._rem_host(ip_str) self.deexpress(address=ip_str) - logging.debug("Releasing IP %s", ip_str) def deallocate_ip(self, ip_str): """Deallocates an allocated ip""" - # NOTE(vish): Perhaps we should put the ip into an intermediate - # state, so we know that we are pending waiting for - # dnsmasq to confirm that it has been released. - logging.debug("Deallocating allocated IP %s", ip_str) + if not ip_str in self.assigned: + raise exception.AddressNotAllocated() + address = self.get_address(ip_str) + if address: + if address['state'] != 'allocated': + # NOTE(vish): address hasn't been leased, so release it + self.release_ip(ip_str) + else: + logging.debug("Deallocating allocated IP %s", ip_str) + address['state'] == 'deallocated' + address.save() def express(self, address=None): """Set up network. Implemented in subclasses""" From 8d4dd0924bfd45b7806e6a29018de45d58ee6339 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 12:44:23 -0700 Subject: [PATCH 067/101] rename address stuff to avoid name collision and make the .all() iterator work again --- nova/endpoint/cloud.py | 4 ++-- nova/network/model.py | 26 +++++++++++++------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index c79e96f5dd..27310577fc 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -311,7 +311,7 @@ class CloudController(object): def _get_address(self, context, public_ip): # FIXME(vish) this should move into network.py - address = network_model.PublicAddress.lookup(public_ip) + address = network_model.ElasticIp.lookup(public_ip) if address and (context.user.is_admin() or address['project_id'] == context.project.id): return address raise exception.NotFound("Address at ip %s not found" % public_ip) @@ -456,7 +456,7 @@ class CloudController(object): def format_addresses(self, context): addresses = [] - for address in network_model.PublicAddress.all(): + for address in network_model.ElasticIp.all(): # TODO(vish): implement a by_project iterator for addresses if (context.user.is_admin() or address['project_id'] == context.project.id): diff --git a/nova/network/model.py b/nova/network/model.py index 49c12e4599..7ae68d8a74 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -143,13 +143,12 @@ class Vlan(datastore.BasicModel): network[start + FLAGS.network_size - 1]) -class Address(datastore.BasicModel): +class FixedIp(datastore.BasicModel): """Represents a fixed ip in the datastore""" - override_type = "address" def __init__(self, address): self.address = address - super(Address, self).__init__() + super(FixedIp, self).__init__() @property def identifier(self): @@ -163,7 +162,7 @@ class Address(datastore.BasicModel): @classmethod # pylint: disable=R0913 def create(cls, user_id, project_id, address, mac, hostname, network_id): - """Creates an Address object""" + """Creates an FixedIp object""" addr = cls(address) addr['user_id'] = user_id addr['project_id'] = project_id @@ -178,16 +177,16 @@ class Address(datastore.BasicModel): def save(self): is_new = self.is_new_record() - success = super(Address, self).save() + success = super(FixedIp, self).save() if success and is_new: self.associate_with("network", self['network_id']) def destroy(self): self.unassociate_with("network", self['network_id']) - super(Address, self).destroy() + super(FixedIp, self).destroy() -class PublicAddress(Address): +class ElasticIp(FixedIp): """Represents an elastic ip in the datastore""" override_type = "address" @@ -203,7 +202,7 @@ class PublicAddress(Address): class BaseNetwork(datastore.BasicModel): """Implements basic logic for allocating ips in a network""" override_type = 'network' - address_class = Address + address_class = FixedIp @property def identifier(self): @@ -271,12 +270,12 @@ class BaseNetwork(datastore.BasicModel): # pylint: disable=R0913 def _add_host(self, user_id, project_id, ip_address, mac, hostname): """Add a host to the datastore""" - Address.create(user_id, project_id, ip_address, + self.address_class.create(user_id, project_id, ip_address, mac, hostname, self.identifier) def _rem_host(self, ip_address): """Remove a host from the datastore""" - Address(ip_address).destroy() + self.address_class(ip_address).destroy() @property def assigned(self): @@ -288,6 +287,7 @@ class BaseNetwork(datastore.BasicModel): """Returns a list of all assigned addresses as objects""" return self.address_class.associated_to('network', self.identifier) + @classmethod def get_address(self, ip_address): """Returns a specific ip as an object""" if ip_address in self.assigned: @@ -478,7 +478,7 @@ DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): """Handles elastic ips""" override_type = 'network' - address_class = PublicAddress + address_class = ElasticIp def __init__(self, *args, **kwargs): network_id = "public:default" @@ -613,7 +613,7 @@ def get_project_network(project_id, security_group='default'): def get_network_by_address(address): """Gets the network for a given private ip""" - address_record = Address.lookup(address) + address_record = FixedIp.lookup(address) if not address_record: raise exception.AddressNotAllocated() return get_project_network(address_record['project_id']) @@ -629,6 +629,6 @@ def get_network_by_interface(iface, security_group='default'): def get_public_ip_for_instance(instance_id): """Gets the public ip for a given instance""" # FIXME(josh): this should be a lookup - iteration won't scale - for address_record in PublicAddress.all(): + for address_record in ElasticIp.all(): if address_record.get('instance_id', 'available') == instance_id: return address_record['address'] From 773390a4daa633b8a54b4fc29600182b6bfb915d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 13:33:22 -0700 Subject: [PATCH 068/101] typo allocated should be relased --- nova/endpoint/cloud.py | 4 ++-- nova/network/model.py | 28 ++++++++++++++-------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index c79e96f5dd..1b07f2adb3 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -311,7 +311,7 @@ class CloudController(object): def _get_address(self, context, public_ip): # FIXME(vish) this should move into network.py - address = network_model.PublicAddress.lookup(public_ip) + address = network_model.PublicNetworkController.get_address(public_ip) if address and (context.user.is_admin() or address['project_id'] == context.project.id): return address raise exception.NotFound("Address at ip %s not found" % public_ip) @@ -456,7 +456,7 @@ class CloudController(object): def format_addresses(self, context): addresses = [] - for address in network_model.PublicAddress.all(): + for address in network_model.PublicNetworkController.assigned_objs(): # TODO(vish): implement a by_project iterator for addresses if (context.user.is_admin() or address['project_id'] == context.project.id): diff --git a/nova/network/model.py b/nova/network/model.py index 49c12e4599..e536936939 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -143,13 +143,12 @@ class Vlan(datastore.BasicModel): network[start + FLAGS.network_size - 1]) -class Address(datastore.BasicModel): +class FixedIp(datastore.BasicModel): """Represents a fixed ip in the datastore""" - override_type = "address" def __init__(self, address): self.address = address - super(Address, self).__init__() + super(FixedIp, self).__init__() @property def identifier(self): @@ -163,7 +162,7 @@ class Address(datastore.BasicModel): @classmethod # pylint: disable=R0913 def create(cls, user_id, project_id, address, mac, hostname, network_id): - """Creates an Address object""" + """Creates an FixedIp object""" addr = cls(address) addr['user_id'] = user_id addr['project_id'] = project_id @@ -178,16 +177,16 @@ class Address(datastore.BasicModel): def save(self): is_new = self.is_new_record() - success = super(Address, self).save() + success = super(FixedIp, self).save() if success and is_new: self.associate_with("network", self['network_id']) def destroy(self): self.unassociate_with("network", self['network_id']) - super(Address, self).destroy() + super(FixedIp, self).destroy() -class PublicAddress(Address): +class ElasticIp(FixedIp): """Represents an elastic ip in the datastore""" override_type = "address" @@ -203,7 +202,7 @@ class PublicAddress(Address): class BaseNetwork(datastore.BasicModel): """Implements basic logic for allocating ips in a network""" override_type = 'network' - address_class = Address + address_class = FixedIp @property def identifier(self): @@ -271,12 +270,12 @@ class BaseNetwork(datastore.BasicModel): # pylint: disable=R0913 def _add_host(self, user_id, project_id, ip_address, mac, hostname): """Add a host to the datastore""" - Address.create(user_id, project_id, ip_address, + self.address_class.create(user_id, project_id, ip_address, mac, hostname, self.identifier) def _rem_host(self, ip_address): """Remove a host from the datastore""" - Address(ip_address).destroy() + self.address_class(ip_address).destroy() @property def assigned(self): @@ -288,6 +287,7 @@ class BaseNetwork(datastore.BasicModel): """Returns a list of all assigned addresses as objects""" return self.address_class.associated_to('network', self.identifier) + @classmethod def get_address(self, ip_address): """Returns a specific ip as an object""" if ip_address in self.assigned: @@ -349,7 +349,7 @@ class BaseNetwork(datastore.BasicModel): raise exception.AddressNotAllocated() address = self.get_address(ip_str) if address: - if address['state'] != 'allocated': + if address['state'] != 'leased': # NOTE(vish): address hasn't been leased, so release it self.release_ip(ip_str) else: @@ -478,7 +478,7 @@ DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): """Handles elastic ips""" override_type = 'network' - address_class = PublicAddress + address_class = ElasticIp def __init__(self, *args, **kwargs): network_id = "public:default" @@ -613,7 +613,7 @@ def get_project_network(project_id, security_group='default'): def get_network_by_address(address): """Gets the network for a given private ip""" - address_record = Address.lookup(address) + address_record = FixedIp.lookup(address) if not address_record: raise exception.AddressNotAllocated() return get_project_network(address_record['project_id']) @@ -629,6 +629,6 @@ def get_network_by_interface(iface, security_group='default'): def get_public_ip_for_instance(instance_id): """Gets the public ip for a given instance""" # FIXME(josh): this should be a lookup - iteration won't scale - for address_record in PublicAddress.all(): + for address_record in ElasticIp.all(): if address_record.get('instance_id', 'available') == instance_id: return address_record['address'] From ef48a727d1c6b824170995fffa59949960ea5d11 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 13:36:10 -0700 Subject: [PATCH 069/101] remove class method --- nova/network/model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/network/model.py b/nova/network/model.py index e536936939..1a958b564f 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -287,7 +287,6 @@ class BaseNetwork(datastore.BasicModel): """Returns a list of all assigned addresses as objects""" return self.address_class.associated_to('network', self.identifier) - @classmethod def get_address(self, ip_address): """Returns a specific ip as an object""" if ip_address in self.assigned: From a96b4c1470ee4e73382178206d8728d2a2ba89cf Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 14:18:59 -0700 Subject: [PATCH 070/101] renamed missed reference to Address --- nova/endpoint/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 27310577fc..0a15e934cb 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -126,7 +126,7 @@ class CloudController(object): else: keys = '' - address_record = network_model.Address(i['private_dns_name']) + address_record = network_model.FixedIp(i['private_dns_name']) if address_record: hostname = address_record['hostname'] else: From 6eba59be8ef6ea47e1d9657fed72fafbc7c9d6ef Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 12 Aug 2010 23:41:32 +0200 Subject: [PATCH 071/101] Make --libvirt_type=uml do the right thing: Sets the correct libvirt URI and use a special template for the XML. --- ...xml.template => libvirt.qemu.xml.template} | 0 nova/virt/libvirt.uml.xml.template | 25 ++++++++++++++ nova/virt/libvirt_conn.py | 34 ++++++++++++------- 3 files changed, 47 insertions(+), 12 deletions(-) rename nova/virt/{libvirt.xml.template => libvirt.qemu.xml.template} (100%) create mode 100644 nova/virt/libvirt.uml.xml.template diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.qemu.xml.template similarity index 100% rename from nova/virt/libvirt.xml.template rename to nova/virt/libvirt.qemu.xml.template diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template new file mode 100644 index 0000000000..0bc1507dec --- /dev/null +++ b/nova/virt/libvirt.uml.xml.template @@ -0,0 +1,25 @@ + + %(name)s + %(memory_kb)s + + %(type)suml + /usr/bin/linux + /dev/ubda1 + + + + + + + + + + + + + + + + + %(nova)s + diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 2a818b40d2..e2cdaaf7d8 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -43,19 +43,21 @@ libvirt = None libxml2 = None FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_uri', - 'qemu:///system', - 'Libvirt connection URI') flags.DEFINE_string('libvirt_xml_template', - utils.abspath('compute/libvirt.xml.template'), - 'Libvirt XML Template') + utils.abspath('compute/libvirt.qemu.xml.template'), + 'Libvirt XML Template for QEmu/KVM') +flags.DEFINE_string('libvirt_uml_xml_template', + utils.abspath('compute/libvirt.uml.xml.template'), + 'Libvirt XML Template for user-mode-linux') flags.DEFINE_string('injected_network_template', utils.abspath('virt/interfaces.template'), 'Template file for injected network') - flags.DEFINE_string('libvirt_type', 'kvm', - 'Libvirt domain type (kvm, qemu, etc)') + 'Libvirt domain type (valid options are: kvm, qemu, uml)') +flags.DEFINE_string('libvirt_uri', + '', + 'Override the default libvirt URI (which is dependent on libvirt_type)') def get_connection(read_only): # These are loaded late so that there's no need to install these @@ -74,10 +76,19 @@ class LibvirtConnection(object): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', None] - if read_only: - self._conn = libvirt.openReadOnly(FLAGS.libvirt_uri) + + if FLAGS.libvirt_type == 'uml': + uri = FLAGS.libvirt_uri or 'uml:///system' + template_file = FLAGS.libvirt_uml_xml_template else: - self._conn = libvirt.openAuth(FLAGS.libvirt_uri, auth, 0) + uri = FLAGS.libvirt_uri or 'qemu:///system' + template_file = FLAGS.libvirt_xml_template + self.libvirt_xml = open(template_file).read() + + if read_only: + self._conn = libvirt.openReadOnly(uri) + else: + self._conn = libvirt.openAuth(uri, auth, 0) def list_instances(self): @@ -240,14 +251,13 @@ class LibvirtConnection(object): def toXml(self, instance): # TODO(termie): cache? logging.debug("Starting the toXML method") - libvirt_xml = open(FLAGS.libvirt_xml_template).read() xml_info = instance.datamodel.copy() # TODO(joshua): Make this xml express the attached disks as well # TODO(termie): lazy lazy hack because xml is annoying xml_info['nova'] = json.dumps(instance.datamodel.copy()) xml_info['type'] = FLAGS.libvirt_type - libvirt_xml = libvirt_xml % xml_info + libvirt_xml = self.libvirt_xml % xml_info logging.debug("Finished the toXML method") return libvirt_xml From 11c47dd12adcbf2a5011510f01081db858b057db Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:36:46 -0400 Subject: [PATCH 072/101] Mergeprop cleanup --- nova/endpoint/rackspace/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 nova/endpoint/rackspace/__init__.py diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 From 39d12bf518e284183d1debd52fe7081ecf1c633d Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:36:56 -0400 Subject: [PATCH 073/101] Mergeprop cleanup --- nova/endpoint/rackspace/{rackspace.py => __init__.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename nova/endpoint/rackspace/{rackspace.py => __init__.py} (100%) diff --git a/nova/endpoint/rackspace/rackspace.py b/nova/endpoint/rackspace/__init__.py similarity index 100% rename from nova/endpoint/rackspace/rackspace.py rename to nova/endpoint/rackspace/__init__.py From 4391b7362eeab2cd976309696be1209ac771ce24 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:41:31 -0400 Subject: [PATCH 074/101] Undo the changes to cloud.py that somehow diverged from trunk --- nova/endpoint/cloud.py | 105 ++++++++++++++++++++--------------------- 1 file changed, 52 insertions(+), 53 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 878d54a15a..ad9188ff3d 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -47,6 +47,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + def _gen_key(user_id, key_name): """ Tuck this into AuthManager """ try: @@ -102,15 +103,16 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], + INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: result[instance['key_name']] = [line] return result - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) + def get_metadata(self, ipaddress): + i = self.get_instance_by_ip(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -147,7 +149,7 @@ class CloudController(object): }, 'public-hostname': i.get('dns_name', ''), 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, + 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), 'reservation-id': i['reservation_id'], 'security-groups': i.get('groups', ''), @@ -203,26 +205,22 @@ class CloudController(object): 'keyFingerprint': key_pair.fingerprint, }) - return { 'keypairsSet': result } + return {'keypairsSet': result} @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): - try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - d.errback(kwargs['exception']) - return - d.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return d - - except manager.UserError as e: - raise + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + dcall.errback(kwargs['exception']) + return + dcall.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + pool.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return dcall @rbac.allow('all') def delete_key_pair(self, context, key_name, **kwargs): @@ -232,7 +230,7 @@ class CloudController(object): @rbac.allow('all') def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } + groups = {'securityGroupSet': []} # Stubbed for now to unblock other things. return groups @@ -251,7 +249,7 @@ class CloudController(object): instance = self._get_instance(context, instance_id[0]) return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) + "args": {"instance_id": instance_id[0]}}) def _get_user_id(self, context): if context and context.user: @@ -285,10 +283,10 @@ class CloudController(object): if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] + 'device': volume['mountpoint'], + 'instanceId': volume['instance_id'], + 'status': 'attached', + 'volume_id': volume['volume_id']}] else: v['attachmentSet'] = [{}] return v @@ -298,7 +296,7 @@ class CloudController(object): def create_volume(self, context, size, **kwargs): # TODO(vish): refactor this to create the volume object here and tell service to create it result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, + "args": {"size": size, "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary @@ -348,15 +346,15 @@ class CloudController(object): compute_node = instance['node_name'] rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) @rbac.allow('projectmanager', 'sysadmin') @@ -372,18 +370,18 @@ class CloudController(object): instance = self._get_instance(context, instance_id) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "detach_volume", - "args" : {"instance_id": instance_id, + "args": {"instance_id": instance_id, "volume_id": volume_id}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -425,7 +423,8 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) + instance.get('project_id', None), + instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') i['instance_type'] = instance.get('instance_type', None) @@ -442,7 +441,7 @@ class CloudController(object): reservations[res_id] = r reservations[res_id]['instances_set'].append(i) - instance_response = {'reservationSet' : list(reservations.values()) } + instance_response = {'reservationSet': list(reservations.values())} return instance_response @rbac.allow('all') @@ -457,7 +456,7 @@ class CloudController(object): address['project_id'] == context.project.id): address_rv = { 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') + 'instance_id': address.get('instance_id', 'free') } if context.user.is_admin(): address_rv['instance_id'] = "%s (%s, %s)" % ( @@ -477,7 +476,7 @@ class CloudController(object): "args": {"user_id": context.user.id, "project_id": context.project.id}}) public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @defer.inlineCallbacks @@ -591,7 +590,7 @@ class CloudController(object): inst.save() rpc.cast(FLAGS.compute_topic, {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) + "args": {"instance_id": inst.instance_id}}) logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) # TODO: Make Network figure out the network name from ip. @@ -646,7 +645,7 @@ class CloudController(object): instance = self._get_instance(context, i) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "reboot_instance", - "args" : {"instance_id": i}}) + "args": {"instance_id": i}}) return defer.succeed(True) @rbac.allow('projectmanager', 'sysadmin') @@ -656,7 +655,7 @@ class CloudController(object): volume_node = volume['node_name'] rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) + "args": {"volume_id": volume_id}}) return defer.succeed(True) @rbac.allow('all') @@ -689,9 +688,9 @@ class CloudController(object): image = images.list(context, image_id)[0] except IndexError: raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } + result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) + result['launchPermission'].append({'group': 'all'}) return defer.succeed(result) @rbac.allow('projectmanager', 'sysadmin') From a679cab031ec91dd719b9ba887cdae4f595b2ca4 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 12 Aug 2010 21:27:53 -0700 Subject: [PATCH 075/101] make rpc.call propogate exception info. Includes tests --- nova/endpoint/cloud.py | 15 ++++----- nova/rpc.py | 38 +++++++++++++++++------ nova/tests/rpc_unittest.py | 62 ++++++++++++++++++++++++++++++++++++++ run_tests.py | 1 + 4 files changed, 98 insertions(+), 18 deletions(-) create mode 100644 nova/tests/rpc_unittest.py diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index ad9188ff3d..c32fb1f7fb 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -103,7 +103,7 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], + line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) @@ -300,7 +300,7 @@ class CloudController(object): "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary - volume = self._get_volume(context, result['result']) + volume = self._get_volume(context, result) defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) def _get_address(self, context, public_ip): @@ -423,7 +423,7 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), + instance.get('project_id', None), instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') @@ -471,11 +471,10 @@ class CloudController(object): @defer.inlineCallbacks def allocate_address(self, context, **kwargs): network_topic = yield self._get_network_topic(context) - alloc_result = yield rpc.call(network_topic, + public_ip = yield rpc.call(network_topic, {"method": "allocate_elastic_ip", "args": {"user_id": context.user.id, "project_id": context.project.id}}) - public_ip = alloc_result['result'] defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @@ -516,11 +515,10 @@ class CloudController(object): """Retrieves the network host for a project""" host = network_service.get_host_for_project(context.project.id) if not host: - result = yield rpc.call(FLAGS.network_topic, + host = yield rpc.call(FLAGS.network_topic, {"method": "set_network_host", "args": {"user_id": context.user.id, "project_id": context.project.id}}) - host = result['result'] defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) @rbac.allow('projectmanager', 'sysadmin') @@ -563,13 +561,12 @@ class CloudController(object): vpn = False if image_id == FLAGS.vpn_image_id: vpn = True - allocate_result = yield rpc.call(network_topic, + allocate_data = yield rpc.call(network_topic, {"method": "allocate_fixed_ip", "args": {"user_id": context.user.id, "project_id": context.project.id, "security_group": security_group, "vpn": vpn}}) - allocate_data = allocate_result['result'] inst = self.instdir.new() inst['image_id'] = image_id inst['kernel_id'] = kernel_id diff --git a/nova/rpc.py b/nova/rpc.py index 2a550c3ae2..e06a3e19bc 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -40,7 +40,7 @@ FLAGS = flags.FLAGS _log = logging.getLogger('amqplib') -_log.setLevel(logging.WARN) +_log.setLevel(logging.DEBUG) class Connection(connection.BrokerConnection): @@ -141,8 +141,8 @@ class AdapterConsumer(TopicConsumer): node_args = dict((str(k), v) for k, v in args.iteritems()) d = defer.maybeDeferred(node_func, **node_args) if msg_id: - d.addCallback(lambda rval: msg_reply(msg_id, rval)) - d.addErrback(lambda e: msg_reply(msg_id, str(e))) + d.addCallback(lambda rval: msg_reply(msg_id, rval, None)) + d.addErrback(lambda e: msg_reply(msg_id, None, e)) return @@ -174,20 +174,37 @@ class DirectPublisher(Publisher): super(DirectPublisher, self).__init__(connection=connection) -def msg_reply(msg_id, reply): +def msg_reply(msg_id, reply=None, failure=None): + if failure: + message = failure.getErrorMessage() + traceback = failure.getTraceback() + logging.error("Returning exception %s to caller", message) + logging.error(traceback) + failure = (failure.type.__name__, str(failure.value), traceback) conn = Connection.instance() publisher = DirectPublisher(connection=conn, msg_id=msg_id) - try: - publisher.send({'result': reply}) - except TypeError: + publisher.send({'result': reply, 'failure': failure}) + except Exception, exc: publisher.send( {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()) + for k, v in reply.__dict__.iteritems()), + 'failure': failure }) publisher.close() +class RemoteError(exception.Error): + """signifies that a remote class has raised an exception""" + def __init__(self, type, value, traceback): + self.type = type + self.value = value + self.traceback = traceback + super(RemoteError, self).__init__("%s %s\n%s" % (type, + value, + traceback)) + + def call(topic, msg): _log.debug("Making asynchronous call...") msg_id = uuid.uuid4().hex @@ -199,7 +216,10 @@ def call(topic, msg): consumer = DirectConsumer(connection=conn, msg_id=msg_id) def deferred_receive(data, message): message.ack() - d.callback(data) + if data['failure']: + return d.errback(RemoteError(*data['failure'])) + else: + return d.callback(data['result']) consumer.register_callback(deferred_receive) injected = consumer.attach_to_tornado() diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py new file mode 100644 index 0000000000..9c2e29344a --- /dev/null +++ b/nova/tests/rpc_unittest.py @@ -0,0 +1,62 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from twisted.internet import defer + +from nova import flags +from nova import rpc +from nova import test + + +FLAGS = flags.FLAGS + + +class RpcTestCase(test.BaseTestCase): + def setUp(self): + super(RpcTestCase, self).setUp() + self.conn = rpc.Connection.instance() + self.receiver = TestReceiver() + self.consumer = rpc.AdapterConsumer(connection=self.conn, + topic='test', + proxy=self.receiver) + + self.injected.append(self.consumer.attach_to_tornado(self.ioloop)) + + def test_call_succeed(self): + value = 42 + result = yield rpc.call('test', {"method": "echo", "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_exception(self): + value = 42 + self.assertFailure(rpc.call('test', {"method": "fail", "args": {"value": value}}), rpc.RemoteError) + try: + yield rpc.call('test', {"method": "fail", "args": {"value": value}}) + self.fail("should have thrown rpc.RemoteError") + except rpc.RemoteError as exc: + self.assertEqual(int(exc.value), value) + +class TestReceiver(object): + def echo(self, value): + logging.debug("Received %s", value) + return defer.succeed(value) + + def fail(self, value): + raise Exception(value) diff --git a/run_tests.py b/run_tests.py index 7fe6e73ec6..d90ac81750 100644 --- a/run_tests.py +++ b/run_tests.py @@ -59,6 +59,7 @@ from nova.tests.model_unittest import * from nova.tests.network_unittest import * from nova.tests.objectstore_unittest import * from nova.tests.process_unittest import * +from nova.tests.rpc_unittest import * from nova.tests.validator_unittest import * from nova.tests.volume_unittest import * From ea2805d372a0d4a480667058e96288bf15844828 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 13 Aug 2010 11:51:33 +0100 Subject: [PATCH 076/101] Added documentation to the nova.virt interface. --- nova/virt/connection.py | 9 +++ nova/virt/fake.py | 129 +++++++++++++++++++++++++++++++++++++- nova/virt/libvirt_conn.py | 20 ------ 3 files changed, 137 insertions(+), 21 deletions(-) diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 004adb19db..90bc7fa0a8 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -27,6 +27,15 @@ FLAGS = flags.FLAGS def get_connection(read_only=False): + """Returns an object representing the connection to a virtualization + platform. This could be nova.virt.fake.FakeConnection in test mode, + a connection to KVM or QEMU via libvirt, or a connection to XenServer + or Xen Cloud Platform via XenAPI. + + Any object returned here must conform to the interface documented by + FakeConnection. + """ + # TODO(termie): maybe lazy load after initial check for permissions # TODO(termie): check whether we can be disconnected t = FLAGS.connection_type diff --git a/nova/virt/fake.py b/nova/virt/fake.py index d9ae5ac961..1058371816 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -19,6 +19,7 @@ """ A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor. +This module also documents the semantics of real hypervisor connections. """ import logging @@ -32,6 +33,38 @@ def get_connection(_): class FakeConnection(object): + """ + The interface to this class talks in terms of 'instances' (Amazon EC2 and + internal Nova terminology), by which we mean 'running virtual machine' + (XenAPI terminology) or domain (Xen or libvirt terminology). + + An instance has an ID, which is the identifier chosen by Nova to represent + the instance further up the stack. This is unfortunately also called a + 'name' elsewhere. As far as this layer is concerned, 'instance ID' and + 'instance name' are synonyms. + + Note that the instance ID or name is not human-readable or + customer-controlled -- it's an internal ID chosen by Nova. At the + nova.virt layer, instances do not have human-readable names at all -- such + things are only known higher up the stack. + + Most virtualization platforms will also have their own identity schemes, + to uniquely identify a VM or domain. These IDs must stay internal to the + platform-specific layer, and never escape the connection interface. The + platform-specific layer is responsible for keeping track of which instance + ID maps to which platform-specific ID, and vice versa. + + In contrast, the list_disks and list_interfaces calls may return + platform-specific IDs. These identify a specific virtual disk or specific + virtual network interface, and these IDs are opaque to the rest of Nova. + + Some methods here take an instance of nova.compute.service.Instance. This + is the datastructure used by nova.compute to store details regarding an + instance, and pass them into this layer. This layer is responsible for + translating that generic datastructure into terms that are specific to the + virtualization platform. + """ + def __init__(self): self.instances = {} @@ -42,20 +75,59 @@ class FakeConnection(object): return cls._instance def list_instances(self): + """ + Return the names of all the instances known to the virtualization + layer, as a list. + """ return self.instances.keys() def spawn(self, instance): + """ + Create a new instance/VM/domain on the virtualization platform. + + The given parameter is an instance of nova.compute.service.Instance. + This function should use the data there to guide the creation of + the new instance. + + Once this function successfully completes, the instance should be + running (power_state.RUNNING). + + If this function fails, any partial instance should be completely + cleaned up, and the virtualization platform should be in the state + that it was before this call began. + """ + fake_instance = FakeInstance() self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING def reboot(self, instance): + """ + Reboot the specified instance. + + The given parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. + """ pass - + def destroy(self, instance): + """ + Destroy (shutdown and delete) the specified instance. + + The given parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. + """ del self.instances[instance.name] def get_info(self, instance_id): + """ + Get a block of information about the given instance. This is returned + as a dictionary containing 'state': The power_state of the instance, + 'max_mem': The maximum memory for the instance, in KiB, 'mem': The + current memory the instance has, in KiB, 'num_cpu': The current number + of virtual CPUs the instance has, 'cpu_time': The total CPU time used + by the instance, in nanoseconds. + """ i = self.instances[instance_id] return {'state': i._state, 'max_mem': 0, @@ -64,15 +136,70 @@ class FakeConnection(object): 'cpu_time': 0} def list_disks(self, instance_id): + """ + Return the IDs of all the virtual disks attached to the specified + instance, as a list. These IDs are opaque to the caller (they are + only useful for giving back to this layer as a parameter to + disk_stats). These IDs only need to be unique for a given instance. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return ['A_DISK'] def list_interfaces(self, instance_id): + """ + Return the IDs of all the virtual network interfaces attached to the + specified instance, as a list. These IDs are opaque to the caller + (they are only useful for giving back to this layer as a parameter to + interface_stats). These IDs only need to be unique for a given + instance. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return ['A_VIF'] def block_stats(self, instance_id, disk_id): + """ + Return performance counters associated with the given disk_id on the + given instance_id. These are returned as [rd_req, rd_bytes, wr_req, + wr_bytes, errs], where rd indicates read, wr indicates write, req is + the total number of I/O requests made, bytes is the total number of + bytes transferred, and errs is the number of requests held up due to a + full pipeline. + + All counters are long integers. + + This method is optional. On some platforms (e.g. XenAPI) performance + statistics can be retrieved directly in aggregate form, without Nova + having to do the aggregation. On those platforms, this method is + unused. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return [0L, 0L, 0L, 0L, null] def interface_stats(self, instance_id, iface_id): + """ + Return performance counters associated with the given iface_id on the + given instance_id. These are returned as [rx_bytes, rx_packets, + rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx + indicates receive, tx indicates transmit, bytes and packets indicate + the total number of bytes or packets transferred, and errs and dropped + is the total number of packets failed / dropped. + + All counters are long integers. + + This method is optional. On some platforms (e.g. XenAPI) performance + statistics can be retrieved directly in aggregate form, without Nova + having to do the aggregation. On those platforms, this method is + unused. + + Note that this function takes an instance ID, not a + compute.service.Instance, so that it can be called by compute.monitor. + """ return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L] diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 13305be0f5..d031a10d8f 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -261,12 +261,6 @@ class LibvirtConnection(object): def get_disks(self, instance_id): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - - Returns a list of all block devices for this domain. - """ domain = self._conn.lookupByName(instance_id) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) @@ -304,12 +298,6 @@ class LibvirtConnection(object): def get_interfaces(self, instance_id): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - - Returns a list of all network interfaces for this instance. - """ domain = self._conn.lookupByName(instance_id) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) @@ -347,18 +335,10 @@ class LibvirtConnection(object): def block_stats(self, instance_id, disk): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - """ domain = self._conn.lookupByName(instance_id) return domain.blockStats(disk) def interface_stats(self, instance_id, interface): - """ - Note that this function takes an instance ID, not an Instance, so - that it can be called by monitor. - """ domain = self._conn.lookupByName(instance_id) return domain.interfaceStats(interface) From 4c39eca0c90fc798e9980b8fe750d66208fecae5 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 13 Aug 2010 14:33:07 +0100 Subject: [PATCH 077/101] Added note regarding dependency upon XenAPI.py. --- doc/source/getting.started.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/getting.started.rst b/doc/source/getting.started.rst index 3eadd08827..f683bb256a 100644 --- a/doc/source/getting.started.rst +++ b/doc/source/getting.started.rst @@ -40,6 +40,7 @@ Python libraries we don't vendor * M2Crypto: python library interface for openssl * curl +* XenAPI: Needed only for Xen Cloud Platform or XenServer support. Available from http://wiki.xensource.com/xenwiki/XCP_SDK or http://community.citrix.com/cdn/xs/sdks. Vendored python libaries (don't require any installation) From 3d15adb40c5fc569bd29d4779fca792263338e54 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Fri, 13 Aug 2010 10:14:34 -0400 Subject: [PATCH 078/101] Merge case statement options --- run_tests.sh | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 31bfce9fac..6ea40d95ed 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -25,12 +25,9 @@ function process_options { function process_option { option=$1 case $option in - --help) usage;; - -h) usage;; - -V) let always_venv=1; let never_venv=0;; - --virtual-env) let always_venv=1; let never_venv=0;; - -N) let always_venv=0; let never_venv=1;; - --no-virtual-env) let always_venv=0; let never_venv=1;; + -h|--help) usage;; + -V|--virtual-env) let always_venv=1; let never_venv=0;; + -N|--no-virtual-env) let always_venv=0; let never_venv=1;; esac } From bfb906cb0235a6e0b037d387aadc4abc2280fea0 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Fri, 13 Aug 2010 11:09:27 -0400 Subject: [PATCH 079/101] Support JSON and XML in Serializer --- nova/wsgi.py | 54 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 45 insertions(+), 9 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 304f7149a7..0570e18298 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -218,23 +218,59 @@ class Serializer(object): Serializes a dictionary to a Content Type specified by a WSGI environment. """ - def __init__(self, environ): - """Create a serializer based on the given WSGI environment.""" + def __init__(self, environ, metadata=None): + """ + Create a serializer based on the given WSGI environment. + 'metadata' is an optional dict mapping MIME types to information + needed to serialize a dictionary to that type. + """ self.environ = environ + self.metadata = metadata or {} - def serialize(self, data): + def to_content_type(self, data): """ Serialize a dictionary into a string. The format of the string will be decided based on the Content Type requested in self.environ: by Accept: header, or by URL suffix. """ - req = webob.Request(self.environ) - # TODO(gundlach): do XML correctly and be more robust - if req.accept and 'application/json' in req.accept: + mimetype = 'application/xml' + # TODO(gundlach): determine mimetype from request + + if mimetype == 'application/json': import json return json.dumps(data) + elif mimetype == 'application/xml': + metadata = self.metadata.get('application/xml', {}) + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + from xml.dom import minidom + doc = minidom.Document() + node = self._to_xml_node(doc, metadata, root_key, data[root_key]) + return node.toprettyxml(indent=' ') else: - return '' + repr(data) + \ - '' - + return repr(data) + def _to_xml_node(self, doc, metadata, nodename, data): + result = doc.createElement(nodename) + if type(data) is list: + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + elif type(data) is dict: + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k,v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: # atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result From 8bdc9ec6f90341ed1a3890af283addc7c0a053c9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 13 Aug 2010 12:51:38 -0700 Subject: [PATCH 080/101] pep8 and pylint cleanup --- nova/rpc.py | 123 +++++++++++++++++++++++++++---------- nova/tests/rpc_unittest.py | 37 ++++++++--- 2 files changed, 122 insertions(+), 38 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index e06a3e19bc..4ac546c2a6 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -21,14 +21,13 @@ AMQP-based RPC. Queues have consumers and publishers. No fan-out support yet. """ -from carrot import connection +from carrot import connection as carrot_connection from carrot import messaging import json import logging import sys import uuid from twisted.internet import defer -from twisted.internet import reactor from twisted.internet import task from nova import exception @@ -39,13 +38,15 @@ from nova import flags FLAGS = flags.FLAGS -_log = logging.getLogger('amqplib') -_log.setLevel(logging.DEBUG) +LOG = logging.getLogger('amqplib') +LOG.setLevel(logging.DEBUG) -class Connection(connection.BrokerConnection): +class Connection(carrot_connection.BrokerConnection): + """Connection instance object""" @classmethod def instance(cls): + """Returns the instance""" if not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, @@ -56,18 +57,33 @@ class Connection(connection.BrokerConnection): if FLAGS.fake_rabbit: params['backend_cls'] = fakerabbit.Backend + # NOTE(vish): magic is fun! + # pylint: disable=W0142 cls._instance = cls(**params) return cls._instance @classmethod def recreate(cls): + """Recreates the connection instance + + This is necessary to recover from some network errors/disconnects""" del cls._instance return cls.instance() + class Consumer(messaging.Consumer): + """Consumer base class + + Contains methods for connecting the fetch method to async loops + """ + def __init__(self, *args, **kwargs): + self.failed_connection = False + super(Consumer, self).__init__(*args, **kwargs) + # TODO(termie): it would be nice to give these some way of automatically # cleaning up after themselves def attach_to_tornado(self, io_inst=None): + """Attach a callback to tornado that fires 10 times a second""" from tornado import ioloop if io_inst is None: io_inst = ioloop.IOLoop.instance() @@ -79,33 +95,44 @@ class Consumer(messaging.Consumer): attachToTornado = attach_to_tornado - def fetch(self, *args, **kwargs): + def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + """Wraps the parent fetch with some logic for failed connections""" # TODO(vish): the logic for failed connections and logging should be # refactored into some sort of connection manager object try: - if getattr(self, 'failed_connection', False): - # attempt to reconnect + if self.failed_connection: + # NOTE(vish): conn is defined in the parent class, we can + # recreate it as long as we create the backend too + # pylint: disable=W0201 self.conn = Connection.recreate() self.backend = self.conn.create_backend() - super(Consumer, self).fetch(*args, **kwargs) - if getattr(self, 'failed_connection', False): + super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) + if self.failed_connection: logging.error("Reconnected to queue") self.failed_connection = False - except Exception, ex: - if not getattr(self, 'failed_connection', False): + # NOTE(vish): This is catching all errors because we really don't + # exceptions to be logged 10 times a second if some + # persistent failure occurs. + except Exception: # pylint: disable=W0703 + if not self.failed_connection: logging.exception("Failed to fetch message from queue") self.failed_connection = True def attach_to_twisted(self): + """Attach a callback to twisted that fires 10 times a second""" loop = task.LoopingCall(self.fetch, enable_callbacks=True) loop.start(interval=0.1) + class Publisher(messaging.Publisher): + """Publisher base class""" pass class TopicConsumer(Consumer): + """Consumes messages on a specific topic""" exchange_type = "topic" + def __init__(self, connection=None, topic="broadcast"): self.queue = topic self.routing_key = topic @@ -115,14 +142,24 @@ class TopicConsumer(Consumer): class AdapterConsumer(TopicConsumer): + """Calls methods on a proxy object based on method and args""" def __init__(self, connection=None, topic="broadcast", proxy=None): - _log.debug('Initing the Adapter Consumer for %s' % (topic)) + LOG.debug('Initing the Adapter Consumer for %s' % (topic)) self.proxy = proxy - super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + super(AdapterConsumer, self).__init__(connection=connection, + topic=topic) @exception.wrap_exception def receive(self, message_data, message): - _log.debug('received %s' % (message_data)) + """Magically looks for a method on the proxy object and calls it + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + """ + LOG.debug('received %s' % (message_data)) msg_id = message_data.pop('_msg_id', None) method = message_data.get('method') @@ -133,12 +170,14 @@ class AdapterConsumer(TopicConsumer): # messages stay in the queue indefinitely, so for now # we just log the message and send an error string # back to the caller - _log.warn('no method for message: %s' % (message_data)) + LOG.warn('no method for message: %s' % (message_data)) msg_reply(msg_id, 'No method for message: %s' % message_data) return node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) + # NOTE(vish): magic is fun! + # pylint: disable=W0142 d = defer.maybeDeferred(node_func, **node_args) if msg_id: d.addCallback(lambda rval: msg_reply(msg_id, rval, None)) @@ -147,7 +186,9 @@ class AdapterConsumer(TopicConsumer): class TopicPublisher(Publisher): + """Publishes messages on a specific topic""" exchange_type = "topic" + def __init__(self, connection=None, topic="broadcast"): self.routing_key = topic self.exchange = FLAGS.control_exchange @@ -156,7 +197,9 @@ class TopicPublisher(Publisher): class DirectConsumer(Consumer): + """Consumes messages directly on a channel specified by msg_id""" exchange_type = "direct" + def __init__(self, connection=None, msg_id=None): self.queue = msg_id self.routing_key = msg_id @@ -166,7 +209,9 @@ class DirectConsumer(Consumer): class DirectPublisher(Publisher): + """Publishes messages directly on a channel specified by msg_id""" exchange_type = "direct" + def __init__(self, connection=None, msg_id=None): self.routing_key = msg_id self.exchange = msg_id @@ -175,51 +220,62 @@ class DirectPublisher(Publisher): def msg_reply(msg_id, reply=None, failure=None): + """Sends a reply or an error on the channel signified by msg_id + + failure should be a twisted failure object""" if failure: message = failure.getErrorMessage() traceback = failure.getTraceback() logging.error("Returning exception %s to caller", message) logging.error(traceback) - failure = (failure.type.__name__, str(failure.value), traceback) + failure = (failure.type.__name__, str(failure.value), traceback) conn = Connection.instance() publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) - except Exception, exc: + except TypeError: publisher.send( {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), - 'failure': failure - }) + 'failure': failure}) publisher.close() class RemoteError(exception.Error): - """signifies that a remote class has raised an exception""" - def __init__(self, type, value, traceback): - self.type = type + """Signifies that a remote class has raised an exception + + Containes a string representation of the type of the original exception, + the value of the original exception, and the traceback. These are + sent to the parent as a joined string so printing the exception + contains all of the relevent info.""" + def __init__(self, exc_type, value, traceback): + self.exc_type = exc_type self.value = value self.traceback = traceback - super(RemoteError, self).__init__("%s %s\n%s" % (type, + super(RemoteError, self).__init__("%s %s\n%s" % (exc_type, value, traceback)) def call(topic, msg): - _log.debug("Making asynchronous call...") + """Sends a message on a topic and wait for a response""" + LOG.debug("Making asynchronous call...") msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - _log.debug("MSG_ID is %s" % (msg_id)) + LOG.debug("MSG_ID is %s" % (msg_id)) conn = Connection.instance() d = defer.Deferred() consumer = DirectConsumer(connection=conn, msg_id=msg_id) + def deferred_receive(data, message): + """Acks message and callbacks or errbacks""" message.ack() if data['failure']: return d.errback(RemoteError(*data['failure'])) else: return d.callback(data['result']) + consumer.register_callback(deferred_receive) injected = consumer.attach_to_tornado() @@ -233,7 +289,8 @@ def call(topic, msg): def cast(topic, msg): - _log.debug("Making asynchronous cast...") + """Sends a message on a topic without waiting for a response""" + LOG.debug("Making asynchronous cast...") conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) @@ -241,16 +298,18 @@ def cast(topic, msg): def generic_response(message_data, message): - _log.debug('response %s', message_data) + """Logs a result and exits""" + LOG.debug('response %s', message_data) message.ack() sys.exit(0) def send_message(topic, message, wait=True): + """Sends a message for testing""" msg_id = uuid.uuid4().hex message.update({'_msg_id': msg_id}) - _log.debug('topic is %s', topic) - _log.debug('message %s', message) + LOG.debug('topic is %s', topic) + LOG.debug('message %s', message) if wait: consumer = messaging.Consumer(connection=Connection.instance(), @@ -273,6 +332,8 @@ def send_message(topic, message, wait=True): consumer.wait() -# TODO: Replace with a docstring test if __name__ == "__main__": + # NOTE(vish): you can send messages from the command line using + # topic and a json sting representing a dictionary + # for the method send_message(sys.argv[1], json.loads(sys.argv[2])) diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index 9c2e29344a..764a97416f 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -15,7 +15,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - +""" +Unit Tests for remote procedure calls using queue +""" import logging from twisted.internet import defer @@ -29,7 +31,8 @@ FLAGS = flags.FLAGS class RpcTestCase(test.BaseTestCase): - def setUp(self): + """Test cases for rpc""" + def setUp(self): # pylint: disable=C0103 super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance() self.receiver = TestReceiver() @@ -40,23 +43,43 @@ class RpcTestCase(test.BaseTestCase): self.injected.append(self.consumer.attach_to_tornado(self.ioloop)) def test_call_succeed(self): + """Get a value through rpc call""" value = 42 - result = yield rpc.call('test', {"method": "echo", "args": {"value": value}}) + result = yield rpc.call('test', {"method": "echo", + "args": {"value": value}}) self.assertEqual(value, result) def test_call_exception(self): + """Test that exception gets passed back properly + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + """ value = 42 - self.assertFailure(rpc.call('test', {"method": "fail", "args": {"value": value}}), rpc.RemoteError) + self.assertFailure(rpc.call('test', {"method": "fail", + "args": {"value": value}}), + rpc.RemoteError) try: - yield rpc.call('test', {"method": "fail", "args": {"value": value}}) + yield rpc.call('test', {"method": "fail", + "args": {"value": value}}) self.fail("should have thrown rpc.RemoteError") except rpc.RemoteError as exc: self.assertEqual(int(exc.value), value) + class TestReceiver(object): - def echo(self, value): + """Simple Proxy class so the consumer has methods to call + + Uses static methods because we aren't actually storing any state""" + + @staticmethod + def echo(value): + """Simply returns whatever value is sent in""" logging.debug("Received %s", value) return defer.succeed(value) - def fail(self, value): + @staticmethod + def fail(value): + """Raises an exception with the value sent in""" raise Exception(value) From d744a5e7bd7aef545def85d54c9e1fc3480c55fc Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 13 Aug 2010 14:09:30 -0700 Subject: [PATCH 081/101] Fixes out of order arguments in get_credentials --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 6af092922a..071436b13b 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -206,7 +206,7 @@ class ProjectCommands(object): def zipfile(self, project_id, user_id, filename='nova.zip'): """Exports credentials for project to a zip file arguments: project_id user_id [filename='nova.zip]""" - zip_file = self.manager.get_credentials(project_id, user_id) + zip_file = self.manager.get_credentials(user_id, project_id) with open(filename, 'w') as f: f.write(zip_file) From 8aa4d9c2f9f3f7cadda334a1161d66c2303e2979 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 13 Aug 2010 23:44:14 +0200 Subject: [PATCH 082/101] Remove extra "uml" from os.type. --- nova/virt/libvirt.uml.xml.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template index 0bc1507dec..6f4290f985 100644 --- a/nova/virt/libvirt.uml.xml.template +++ b/nova/virt/libvirt.uml.xml.template @@ -2,7 +2,7 @@ %(name)s %(memory_kb)s - %(type)suml + %(type)s /usr/bin/linux /dev/ubda1 From 2dd318827965f20d9a64e624e15dc1a1fee7bf5e Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 13 Aug 2010 23:45:05 +0200 Subject: [PATCH 083/101] Refactor LibvirtConnection a little bit for easier testing. --- nova/virt/libvirt_conn.py | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index e2cdaaf7d8..97e1b0ab2e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -44,10 +44,10 @@ libxml2 = None FLAGS = flags.FLAGS flags.DEFINE_string('libvirt_xml_template', - utils.abspath('compute/libvirt.qemu.xml.template'), + utils.abspath('virt/libvirt.qemu.xml.template'), 'Libvirt XML Template for QEmu/KVM') flags.DEFINE_string('libvirt_uml_xml_template', - utils.abspath('compute/libvirt.uml.xml.template'), + utils.abspath('virt/libvirt.uml.xml.template'), 'Libvirt XML Template for user-mode-linux') flags.DEFINE_string('injected_network_template', utils.abspath('virt/interfaces.template'), @@ -70,25 +70,42 @@ def get_connection(read_only): libxml2 = __import__('libxml2') return LibvirtConnection(read_only) - class LibvirtConnection(object): def __init__(self, read_only): - auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], - 'root', - None] + self.libvirt_uri, template_file = self.get_uri_and_template() + self.libvirt_xml = open(template_file).read() + self._wrapped_conn = None + self.read_only = read_only + + + @property + def _conn(self): + if not self._wrapped_conn: + self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) + return self._wrapped_conn + + + def get_uri_and_template(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' template_file = FLAGS.libvirt_uml_xml_template else: uri = FLAGS.libvirt_uri or 'qemu:///system' template_file = FLAGS.libvirt_xml_template - self.libvirt_xml = open(template_file).read() + return uri, template_file + + + def _connect(self, uri, read_only): + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], + 'root', + None] if read_only: - self._conn = libvirt.openReadOnly(uri) + return libvirt.openReadOnly(uri) else: - self._conn = libvirt.openAuth(uri, auth, 0) + return libvirt.openAuth(uri, auth, 0) + def list_instances(self): From 49a20981634e880fa14420f0b18b3c64b1f6c06f Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 13 Aug 2010 23:45:26 +0200 Subject: [PATCH 084/101] Move interfaces template into virt/, too. --- nova/{compute => virt}/interfaces.template | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename nova/{compute => virt}/interfaces.template (100%) diff --git a/nova/compute/interfaces.template b/nova/virt/interfaces.template similarity index 100% rename from nova/compute/interfaces.template rename to nova/virt/interfaces.template From 7bbf2f7f9f7c7c49287519207e56932e28061514 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 13 Aug 2010 23:46:44 +0200 Subject: [PATCH 085/101] Add a few unit tests for libvirt_conn. --- nova/tests/virt_unittest.py | 69 +++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 nova/tests/virt_unittest.py diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py new file mode 100644 index 0000000000..2aab16809c --- /dev/null +++ b/nova/tests/virt_unittest.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova import test +from nova.virt import libvirt_conn + +FLAGS = flags.FLAGS + + +class LibvirtConnTestCase(test.TrialTestCase): + def test_get_uri_and_template(self): + class MockDataModel(object): + def __init__(self): + self.datamodel = { 'name' : 'i-cafebabe', + 'memory_kb' : '1024000', + 'basepath' : '/some/path', + 'bridge_name' : 'br100', + 'mac_address' : '02:12:34:46:56:67', + 'vcpus' : 2 } + + type_uri_map = { 'qemu' : ('qemu:///system', + [lambda s: '' in s, + lambda s: 'type>hvm/usr/bin/kvm' not in s]), + 'kvm' : ('qemu:///system', + [lambda s: '' in s, + lambda s: 'type>hvm/usr/bin/qemu<' not in s]), + 'uml' : ('uml:///system', + [lambda s: '' in s, + lambda s: 'type>uml Date: Sat, 14 Aug 2010 11:46:10 +0100 Subject: [PATCH 086/101] Bug #617776: DescribeImagesResponse contains type element, when it should be called imageType Make the objectstore respond with the field 'imageType' as well as 'type'. The former is the correct one, according to the EC2 API specification for the DescribeImages response. The latter is for compatibility with euca2ools and other clients. --- nova/objectstore/handler.py | 18 +++++++++++++++++- nova/objectstore/image.py | 4 ++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index f625a2aa1e..dfe1918e3b 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -269,7 +269,23 @@ class ImagesResource(Resource): images = [i for i in image.Image.all() \ if i.is_authorized(request.context, readonly=True)] - request.write(json.dumps([i.metadata for i in images])) + # Bug #617776: + # We used to have 'type' in the image metadata, but this field + # should be called 'imageType', as per the EC2 specification. + # For compat with old metadata files we copy type to imageType if + # imageType is not present. + # For compat with euca2ools (and any other clients using the + # incorrect name) we copy imageType to type. + # imageType is primary if we end up with both in the metadata file + # (which should never happen). + def decorate(m): + if 'imageType' not in m and 'type' in m: + m[u'imageType'] = m['type'] + elif 'imageType' in m: + m[u'type'] = m['imageType'] + return m + + request.write(json.dumps([decorate(i.metadata) for i in images])) request.finish() return server.NOT_DONE_YET diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index 860298ba64..861eb364fc 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -148,7 +148,7 @@ class Image(object): 'imageOwnerId': 'system', 'isPublic': public, 'architecture': 'x86_64', - 'type': image_type, + 'imageType': image_type, 'state': 'available' } @@ -195,7 +195,7 @@ class Image(object): 'imageOwnerId': context.project.id, 'isPublic': False, # FIXME: grab public from manifest 'architecture': 'x86_64', # FIXME: grab architecture from manifest - 'type' : image_type + 'imageType' : image_type } def write_state(state): From b323a5fc6d08b52bde18c64fea70a7b3421cadc3 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sat, 14 Aug 2010 19:04:19 +0100 Subject: [PATCH 087/101] Bug 617913: RunInstances response doesn't meet EC2 specification Fix the RunInstances response to match the EC2 specification. This involved moving the instance details from to . --- nova/endpoint/cloud.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index ad9188ff3d..eb0c05229c 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -392,7 +392,15 @@ class CloudController(object): @rbac.allow('all') def describe_instances(self, context, **kwargs): - return defer.succeed(self._format_instances(context)) + return defer.succeed(self._format_describe_instances(context)) + + def _format_describe_instances(self, context): + return { 'reservationSet': self._format_instances(context) } + + def _format_run_instances(self, context, reservation_id): + i = self._format_instances(context, reservation_id) + assert len(i) == 1 + return i[0] def _format_instances(self, context, reservation_id = None): reservations = {} @@ -441,8 +449,7 @@ class CloudController(object): reservations[res_id] = r reservations[res_id]['instances_set'].append(i) - instance_response = {'reservationSet': list(reservations.values())} - return instance_response + return list(reservations.values()) @rbac.allow('all') def describe_addresses(self, context, **kwargs): @@ -594,7 +601,7 @@ class CloudController(object): logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) # TODO: Make Network figure out the network name from ip. - defer.returnValue(self._format_instances(context, reservation_id)) + defer.returnValue(self._format_run_instances(context, reservation_id)) @rbac.allow('projectmanager', 'sysadmin') @defer.inlineCallbacks From b50107ec739bc40e29d76ff56587ddbb478bd878 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sat, 14 Aug 2010 23:23:03 +0100 Subject: [PATCH 088/101] Update cloud_unittest to match renamed internal function. --- nova/tests/cloud_unittest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index 40837405c9..3501771cc7 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -132,7 +132,7 @@ class CloudTestCase(test.BaseTestCase): 'state': 0x01, 'user_data': '' } - rv = self.cloud._format_instances(self.context) + rv = self.cloud._format_describe_instances(self.context) self.assert_(len(rv['reservationSet']) == 0) # simulate launch of 5 instances From d1185adcf6f060c125274d31cf11a4f750521d24 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 15 Aug 2010 23:11:52 +0100 Subject: [PATCH 089/101] Add documentation to spawn, reboot, and destroy stating that those functions should return Deferreds. Update the fake implementations to do so (the libvirt ones already do, and making the xenapi ones do so is the subject of a current merge request). --- nova/virt/fake.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 1058371816..155833f3f5 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -24,6 +24,8 @@ This module also documents the semantics of real hypervisor connections. import logging +from twisted.internet import defer + from nova.compute import power_state @@ -89,10 +91,13 @@ class FakeConnection(object): This function should use the data there to guide the creation of the new instance. - Once this function successfully completes, the instance should be + The work will be done asynchronously. This function returns a + Deferred that allows the caller to detect when it is complete. + + Once this successfully completes, the instance should be running (power_state.RUNNING). - If this function fails, any partial instance should be completely + If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. """ @@ -100,6 +105,7 @@ class FakeConnection(object): fake_instance = FakeInstance() self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING + return defer.succeed(None) def reboot(self, instance): """ @@ -107,8 +113,11 @@ class FakeConnection(object): The given parameter is an instance of nova.compute.service.Instance, and so the instance is being specified as instance.name. + + The work will be done asynchronously. This function returns a + Deferred that allows the caller to detect when it is complete. """ - pass + return defer.succeed(None) def destroy(self, instance): """ @@ -116,8 +125,12 @@ class FakeConnection(object): The given parameter is an instance of nova.compute.service.Instance, and so the instance is being specified as instance.name. + + The work will be done asynchronously. This function returns a + Deferred that allows the caller to detect when it is complete. """ del self.instances[instance.name] + return defer.succeed(None) def get_info(self, instance_id): """ From d508418214016d5c00aa8d304f9498f5b99a960b Mon Sep 17 00:00:00 2001 From: andy Date: Mon, 16 Aug 2010 14:16:21 +0200 Subject: [PATCH 090/101] rather comprehensive style fixes --- nova/adminclient.py | 7 +++++++ nova/auth/fakeldap.py | 1 - nova/auth/ldapdriver.py | 1 + nova/auth/manager.py | 12 +++++++----- nova/auth/rbac.py | 2 ++ nova/auth/signer.py | 10 +++++++--- nova/cloudpipe/api.py | 3 ++- nova/cloudpipe/pipelib.py | 2 +- nova/compute/disk.py | 4 ++++ nova/compute/model.py | 2 ++ nova/compute/monitor.py | 35 ++++++++++++++++++++------------- nova/compute/service.py | 1 + nova/crypto.py | 8 +++++++- nova/endpoint/admin.py | 4 ++++ nova/endpoint/api.py | 7 +++++-- nova/endpoint/cloud.py | 3 +-- nova/endpoint/images.py | 7 ++++++- nova/exception.py | 8 ++++++++ nova/fakerabbit.py | 5 +++-- nova/flags.py | 38 +++++++++++++++--------------------- nova/network/exception.py | 12 ++++++------ nova/network/linux_net.py | 6 +++--- nova/network/model.py | 3 ++- nova/network/service.py | 16 +++++++-------- nova/network/vpn.py | 3 +-- nova/objectstore/bucket.py | 1 + nova/objectstore/handler.py | 38 ++++++++++++++++++++++++------------ nova/objectstore/image.py | 1 + nova/objectstore/stored.py | 4 ++-- nova/process.py | 3 +++ nova/rpc.py | 5 +++-- nova/test.py | 5 ++--- nova/utils.py | 12 +++++++++--- nova/validate.py | 1 + nova/virt/images.py | 8 ++++++-- nova/virt/libvirt_conn.py | 23 +++++----------------- nova/virt/xenapi.py | 39 ++++++++++++++++--------------------- nova/volume/service.py | 4 +++- run_tests.py | 9 ++++----- 39 files changed, 208 insertions(+), 145 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index 242298a759..0ca32b1e5e 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -20,6 +20,7 @@ Nova User API client library. """ import base64 + import boto from boto.ec2.regioninfo import RegionInfo @@ -57,6 +58,7 @@ class UserInfo(object): elif name == 'secretkey': self.secretkey = str(value) + class UserRole(object): """ Information about a Nova user's role, as parsed through SAX. @@ -79,6 +81,7 @@ class UserRole(object): else: setattr(self, name, str(value)) + class ProjectInfo(object): """ Information about a Nova project, as parsed through SAX @@ -114,12 +117,14 @@ class ProjectInfo(object): else: setattr(self, name, str(value)) + class ProjectMember(object): """ Information about a Nova project member, as parsed through SAX. Fields include: memberId """ + def __init__(self, connection=None): self.connection = connection self.memberId = None @@ -135,6 +140,7 @@ class ProjectMember(object): self.memberId = value else: setattr(self, name, str(value)) + class HostInfo(object): """ @@ -163,6 +169,7 @@ class HostInfo(object): def endElement(self, name, value, connection): setattr(self, name, value) + class NovaAdminClient(object): def __init__(self, clc_ip='127.0.0.1', region='nova', access_key='admin', secret_key='admin', **kwargs): diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index b420924aff..bc744fa013 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -219,7 +219,6 @@ class FakeLDAP(object): raise NO_SUCH_OBJECT() return objects - @property def __redis_prefix(self): return 'ldap:' diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 453fa196ca..6bf7fcd1ea 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -30,6 +30,7 @@ import sys from nova import exception from nova import flags + FLAGS = flags.FLAGS flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 064fd78bca..80ee788966 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -37,7 +37,6 @@ from nova.network import vpn FLAGS = flags.FLAGS - flags.DEFINE_list('allowed_roles', ['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'], 'Allowed roles for project') @@ -52,7 +51,6 @@ flags.DEFINE_list('superuser_roles', ['cloudadmin'], flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'], 'Roles that apply to all projects') - flags.DEFINE_string('credentials_template', utils.abspath('auth/novarc.template'), 'Template for creating users rc file') @@ -67,15 +65,14 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem', 'Filename of certificate in credentials zip') flags.DEFINE_string('credential_rc_file', 'novarc', 'Filename of rc in credentials zip') - flags.DEFINE_string('credential_cert_subject', '/C=US/ST=California/L=MountainView/O=AnsoLabs/' 'OU=NovaDev/CN=%s-%s', 'Subject for certificate for users') - flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver', 'Driver that auth manager uses') + class AuthBase(object): """Base class for objects relating to auth @@ -83,6 +80,7 @@ class AuthBase(object): an id member. They may optionally contain methods that delegate to AuthManager, but should not implement logic themselves. """ + @classmethod def safe_id(cls, obj): """Safe get object id @@ -100,6 +98,7 @@ class AuthBase(object): class User(AuthBase): """Object representing a user""" + def __init__(self, id, name, access, secret, admin): AuthBase.__init__(self) self.id = id @@ -161,6 +160,7 @@ class KeyPair(AuthBase): Even though this object is named KeyPair, only the public key and fingerprint is stored. The user's private key is not saved. """ + def __init__(self, id, name, owner_id, public_key, fingerprint): AuthBase.__init__(self) self.id = id @@ -179,6 +179,7 @@ class KeyPair(AuthBase): class Project(AuthBase): """Represents a Project returned from the datastore""" + def __init__(self, id, name, project_manager_id, description, member_ids): AuthBase.__init__(self) self.id = id @@ -227,7 +228,6 @@ class Project(AuthBase): self.member_ids) - class AuthManager(object): """Manager Singleton for dealing with Users, Projects, and Keypairs @@ -239,7 +239,9 @@ class AuthManager(object): AuthManager also manages associated data related to Auth objects that need to be more accessible, such as vpn ips and ports. """ + _instance = None + def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" if not cls._instance: diff --git a/nova/auth/rbac.py b/nova/auth/rbac.py index 7fab9419f9..1446e4e274 100644 --- a/nova/auth/rbac.py +++ b/nova/auth/rbac.py @@ -32,6 +32,7 @@ def allow(*roles): return wrapped_f return wrap + def deny(*roles): def wrap(f): def wrapped_f(self, context, *args, **kwargs): @@ -44,6 +45,7 @@ def deny(*roles): return wrapped_f return wrap + def __matches_role(context, role): if role == 'all': return True diff --git a/nova/auth/signer.py b/nova/auth/signer.py index 634f22f0d7..8334806d2a 100644 --- a/nova/auth/signer.py +++ b/nova/auth/signer.py @@ -48,11 +48,15 @@ import hashlib import hmac import logging import urllib -import boto # NOTE(vish): for new boto -import boto.utils # NOTE(vish): for old boto + +# NOTE(vish): for new boto +import boto +# NOTE(vish): for old boto +import boto.utils from nova.exception import Error + class Signer(object): """ hacked up code from boto/connection.py """ @@ -77,7 +81,6 @@ class Signer(object): return self._calc_signature_2(params, verb, server_string, path) raise Error('Unknown Signature Version: %s' % self.SignatureVersion) - def _get_utf8_value(self, value): if not isinstance(value, str) and not isinstance(value, unicode): value = str(value) @@ -133,5 +136,6 @@ class Signer(object): logging.debug('base64 encoded digest: %s' % b64) return b64 + if __name__ == '__main__': print Signer('foo').generate({"SignatureMethod": 'HmacSHA256', 'SignatureVersion': '2'}, "get", "server", "/foo") diff --git a/nova/cloudpipe/api.py b/nova/cloudpipe/api.py index 0bffe9aa34..56aa898341 100644 --- a/nova/cloudpipe/api.py +++ b/nova/cloudpipe/api.py @@ -21,9 +21,10 @@ Tornado REST API Request Handlers for CloudPipe """ import logging -import tornado.web import urllib +import tornado.web + from nova import crypto from nova.auth import manager diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 5b0ed34713..2867bcb21f 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -36,11 +36,11 @@ from nova.endpoint import api FLAGS = flags.FLAGS - flags.DEFINE_string('boot_script_template', utils.abspath('cloudpipe/bootscript.sh'), 'Template for script to run on cloudpipe instance boot') + class CloudPipe(object): def __init__(self, cloud_controller): self.controller = cloud_controller diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 1ffcca685c..c340c5a795 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -24,6 +24,7 @@ Includes injection of SSH PGP keys into authorized_keys file. import logging import os import tempfile + from twisted.internet import defer from nova import exception @@ -84,6 +85,7 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None): yield execute('dd if=%s of=%s bs=%d seek=%d conv=notrunc,fsync' % (infile, outfile, sector_size, primary_first)) + @defer.inlineCallbacks def inject_data(image, key=None, net=None, partition=None, execute=None): """Injects a ssh key and optionally net data into a disk image. @@ -137,6 +139,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None): # remove loopback yield execute('sudo losetup -d %s' % device) + @defer.inlineCallbacks def _inject_key_into_fs(key, fs, execute=None): sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') @@ -146,6 +149,7 @@ def _inject_key_into_fs(key, fs, execute=None): keyfile = os.path.join(sshdir, 'authorized_keys') yield execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') + @defer.inlineCallbacks def _inject_net_into_fs(net, fs, execute=None): netfile = os.path.join(os.path.join(os.path.join( diff --git a/nova/compute/model.py b/nova/compute/model.py index 266a93b9a3..84432b55f0 100644 --- a/nova/compute/model.py +++ b/nova/compute/model.py @@ -168,6 +168,7 @@ class Instance(datastore.BasicModel): self.unassociate_with("ip", self.state['private_dns_name']) return super(Instance, self).destroy() + class Host(datastore.BasicModel): """A Host is the machine where a Daemon is running.""" @@ -235,6 +236,7 @@ class Daemon(datastore.BasicModel): for x in cls.associated_to("host", hostname): yield x + class SessionToken(datastore.BasicModel): """This is a short-lived auth token that is passed through web requests""" diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 19e1a483df..2688649009 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -24,14 +24,15 @@ Instance Monitoring: in the object store. """ -import boto -import boto.s3 import datetime import logging import os -import rrdtool import sys import time + +import boto +import boto.s3 +import rrdtool from twisted.internet import defer from twisted.internet import task from twisted.application import service @@ -41,13 +42,12 @@ from nova.virt import connection as virt_connection FLAGS = flags.FLAGS -flags.DEFINE_integer( - 'monitoring_instances_delay', 5, 'Sleep time between updates') -flags.DEFINE_integer( - 'monitoring_instances_step', 300, 'Interval of RRD updates') -flags.DEFINE_string( - 'monitoring_rrd_path', '/var/nova/monitor/instances', - 'Location of RRD files') +flags.DEFINE_integer('monitoring_instances_delay', 5, + 'Sleep time between updates') +flags.DEFINE_integer('monitoring_instances_step', 300, + 'Interval of RRD updates') +flags.DEFINE_string('monitoring_rrd_path', '/var/nova/monitor/instances', + 'Location of RRD files') RRD_VALUES = { @@ -61,7 +61,7 @@ RRD_VALUES = { 'RRA:MAX:0.5:6:800', 'RRA:MAX:0.5:24:800', 'RRA:MAX:0.5:288:800', - ], + ], 'net': [ 'DS:rx:COUNTER:600:0:1250000', 'DS:tx:COUNTER:600:0:1250000', @@ -73,7 +73,7 @@ RRD_VALUES = { 'RRA:MAX:0.5:6:800', 'RRA:MAX:0.5:24:800', 'RRA:MAX:0.5:288:800', - ], + ], 'disk': [ 'DS:rd:COUNTER:600:U:U', 'DS:wr:COUNTER:600:U:U', @@ -85,12 +85,13 @@ RRD_VALUES = { 'RRA:MAX:0.5:6:800', 'RRA:MAX:0.5:24:800', 'RRA:MAX:0.5:444:800', - ] -} + ] + } utcnow = datetime.datetime.utcnow + def update_rrd(instance, name, data): """ Updates the specified RRD file. @@ -106,6 +107,7 @@ def update_rrd(instance, name, data): '%d:%s' % (timestamp, data) ) + def init_rrd(instance, name): """ Initializes the specified RRD file. @@ -124,6 +126,7 @@ def init_rrd(instance, name): '--start', '0', *RRD_VALUES[name] ) + def graph_cpu(instance, duration): """ @@ -148,6 +151,7 @@ def graph_cpu(instance, duration): store_graph(instance.instance_id, filename) + def graph_net(instance, duration): """ Creates a graph of network usage for the specified instance and duration. @@ -174,6 +178,7 @@ def graph_net(instance, duration): ) store_graph(instance.instance_id, filename) + def graph_disk(instance, duration): """ @@ -202,6 +207,7 @@ def graph_disk(instance, duration): store_graph(instance.instance_id, filename) + def store_graph(instance_id, filename): """ Transmits the specified graph file to internal object store on cloud @@ -387,6 +393,7 @@ class InstanceMonitor(object, service.Service): """ Monitors the running instances of the current machine. """ + def __init__(self): """ Initialize the monitoring loop. diff --git a/nova/compute/service.py b/nova/compute/service.py index 820116453d..e59f3fb348 100644 --- a/nova/compute/service.py +++ b/nova/compute/service.py @@ -29,6 +29,7 @@ import json import logging import os import sys + from twisted.internet import defer from twisted.internet import task diff --git a/nova/crypto.py b/nova/crypto.py index cc84f5e45c..b05548ea1a 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -24,7 +24,6 @@ SSH keypairs and x509 certificates. import base64 import hashlib import logging -import M2Crypto import os import shutil import struct @@ -32,6 +31,8 @@ import tempfile import time import utils +import M2Crypto + from nova import exception from nova import flags @@ -42,11 +43,13 @@ flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our ke flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA') flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?') + def ca_path(project_id): if project_id: return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id) return "%s/cacert.pem" % (FLAGS.ca_path) + def fetch_ca(project_id=None, chain=True): if not FLAGS.use_intermediate_ca: project_id = None @@ -60,6 +63,7 @@ def fetch_ca(project_id=None, chain=True): buffer += cafile.read() return buffer + def generate_key_pair(bits=1024): # what is the magic 65537? @@ -109,6 +113,7 @@ def generate_x509_cert(subject, bits=1024): shutil.rmtree(tmpdir) return (private_key, csr) + def sign_csr(csr_text, intermediate=None): if not FLAGS.use_intermediate_ca: intermediate = None @@ -122,6 +127,7 @@ def sign_csr(csr_text, intermediate=None): os.chdir(start) return _sign_csr(csr_text, user_ca) + def _sign_csr(csr_text, ca_folder): tmpfolder = tempfile.mkdtemp() csrfile = open("%s/inbound.csr" % (tmpfolder), "w") diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index 4f4824fcaf..d6f622755f 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -37,6 +37,7 @@ def user_dict(user, base64_file=None): else: return {} + def project_dict(project): """Convert the project object to a result dict""" if project: @@ -47,6 +48,7 @@ def project_dict(project): else: return {} + def host_dict(host): """Convert a host model object to a result dict""" if host: @@ -54,6 +56,7 @@ def host_dict(host): else: return {} + def admin_only(target): """Decorator for admin-only API calls""" def wrapper(*args, **kwargs): @@ -66,6 +69,7 @@ def admin_only(target): return wrapper + class AdminController(object): """ API Controller for users, hosts, nodes, and workers. diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py index 78a18b9ea0..40be00bb79 100755 --- a/nova/endpoint/api.py +++ b/nova/endpoint/api.py @@ -25,12 +25,13 @@ import logging import multiprocessing import random import re -import tornado.web -from twisted.internet import defer import urllib # TODO(termie): replace minidom with etree from xml.dom import minidom +import tornado.web +from twisted.internet import defer + from nova import crypto from nova import exception from nova import flags @@ -43,6 +44,7 @@ from nova.endpoint import cloud FLAGS = flags.FLAGS flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') + _log = logging.getLogger("api") _log.setLevel(logging.DEBUG) @@ -227,6 +229,7 @@ class MetadataRequestHandler(tornado.web.RequestHandler): self.print_data(data) self.finish() + class APIRequestHandler(tornado.web.RequestHandler): def get(self, controller_name): self.execute(controller_name) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 5366acec79..a3d6d1aabd 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -26,6 +26,7 @@ import base64 import logging import os import time + from twisted.internet import defer from nova import datastore @@ -44,7 +45,6 @@ from nova.volume import service FLAGS = flags.FLAGS - flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') @@ -362,7 +362,6 @@ class CloudController(object): 'status': volume['attach_status'], 'volumeId': volume_id}) - @rbac.allow('projectmanager', 'sysadmin') def detach_volume(self, context, volume_id, **kwargs): volume = self._get_volume(context, volume_id) diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py index fe7cb5d113..2a88d66af0 100644 --- a/nova/endpoint/images.py +++ b/nova/endpoint/images.py @@ -21,10 +21,11 @@ Proxy AMI-related calls from the cloud controller, to the running objectstore daemon. """ -import boto.s3.connection import json import urllib +import boto.s3.connection + from nova import flags from nova import utils from nova.auth import manager @@ -32,6 +33,7 @@ from nova.auth import manager FLAGS = flags.FLAGS + def modify(context, image_id, operation): conn(context).make_request( method='POST', @@ -53,6 +55,7 @@ def register(context, image_location): return image_id + def list(context, filter_list=[]): """ return a list of all images that a user can see @@ -68,6 +71,7 @@ def list(context, filter_list=[]): return [i for i in result if i['imageId'] in filter_list] return result + def deregister(context, image_id): """ unregister an image """ conn(context).make_request( @@ -75,6 +79,7 @@ def deregister(context, image_id): bucket='_images', query_args=qs({'image_id': image_id})) + def conn(context): access = manager.AuthManager().get_access_key(context.user, context.project) diff --git a/nova/exception.py b/nova/exception.py index 52497a19e3..29bcb17f84 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -25,31 +25,39 @@ import logging import sys import traceback + class Error(Exception): def __init__(self, message=None): super(Error, self).__init__(message) + class ApiError(Error): def __init__(self, message='Unknown', code='Unknown'): self.message = message self.code = code super(ApiError, self).__init__('%s: %s'% (code, message)) + class NotFound(Error): pass + class Duplicate(Error): pass + class NotAuthorized(Error): pass + class NotEmpty(Error): pass + class Invalid(Error): pass + def wrap_exception(f): def _wrap(*args, **kw): try: diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 689194513c..0680252490 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -16,12 +16,13 @@ # License for the specific language governing permissions and limitations # under the License. -""" Based a bit on the carrot.backeds.queue backend... but a lot better """ +"""Based a bit on the carrot.backeds.queue backend... but a lot better.""" -from carrot.backends import base import logging import Queue as queue +from carrot.backends import base + class Message(base.BaseMessage): pass diff --git a/nova/flags.py b/nova/flags.py index b3bdd088f4..e3feb252de 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -175,29 +175,25 @@ DEFINE_string('network_topic', 'network', 'the topic network nodes listen on') DEFINE_bool('verbose', False, 'show debug output') DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') -DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses') +DEFINE_bool('fake_network', False, + 'should we use fake network devices and addresses') DEFINE_string('rabbit_host', 'localhost', 'rabbit host') DEFINE_integer('rabbit_port', 5672, 'rabbit port') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('ec2_url', - 'http://127.0.0.1:8773/services/Cloud', - 'Url to ec2 api server') +DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud', + 'Url to ec2 api server') -DEFINE_string('default_image', - 'ami-11111', - 'default image to use, testing only') -DEFINE_string('default_kernel', - 'aki-11111', - 'default kernel to use, testing only') -DEFINE_string('default_ramdisk', - 'ari-11111', - 'default ramdisk to use, testing only') -DEFINE_string('default_instance_type', - 'm1.small', - 'default instance type to use, testing only') +DEFINE_string('default_image', 'ami-11111', + 'default image to use, testing only') +DEFINE_string('default_kernel', 'aki-11111', + 'default kernel to use, testing only') +DEFINE_string('default_ramdisk', 'ari-11111', + 'default ramdisk to use, testing only') +DEFINE_string('default_instance_type', 'm1.small', + 'default instance type to use, testing only') DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', @@ -207,10 +203,8 @@ DEFINE_string('vpn_key_suffix', DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') # UNUSED -DEFINE_string('node_availability_zone', - 'nova', - 'availability zone of this node') -DEFINE_string('node_name', - socket.gethostname(), - 'name of this node') +DEFINE_string('node_availability_zone', 'nova', + 'availability zone of this node') +DEFINE_string('node_name', socket.gethostname(), + 'name of this node') diff --git a/nova/network/exception.py b/nova/network/exception.py index 8d7aa14985..2a3f5ec144 100644 --- a/nova/network/exception.py +++ b/nova/network/exception.py @@ -20,29 +20,29 @@ Exceptions for network errors. """ -from nova.exception import Error +from nova import exception -class NoMoreAddresses(Error): +class NoMoreAddresses(exception.Error): """No More Addresses are available in the network""" pass -class AddressNotAllocated(Error): +class AddressNotAllocated(exception.Error): """The specified address has not been allocated""" pass -class AddressAlreadyAssociated(Error): +class AddressAlreadyAssociated(exception.Error): """The specified address has already been associated""" pass -class AddressNotAssociated(Error): +class AddressNotAssociated(exception.Error): """The specified address is not associated""" pass -class NotValidNetworkSize(Error): +class NotValidNetworkSize(exception.Error): """The network size is not valid""" pass diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4ebc2097b0..b5385fcabb 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -18,16 +18,16 @@ Implements vlans, bridges, and iptables rules using linux utilities. """ import logging -import signal import os +import signal -# todo(ja): does the definition of network_path belong here? +# TODO(ja): does the definition of network_path belong here? from nova import flags from nova import utils -FLAGS = flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_string('dhcpbridge_flagfile', '/etc/nova/nova-dhcpbridge.conf', 'location of flagfile for dhcpbridge') diff --git a/nova/network/model.py b/nova/network/model.py index ce93450674..0900e1513d 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -20,11 +20,11 @@ Model Classes for network control, including VLANs, DHCP, and IP allocation. """ -import IPy import logging import os import time +import IPy from nova import datastore from nova import exception as nova_exception from nova import flags @@ -53,6 +53,7 @@ flags.DEFINE_integer('cnt_vpn_clients', 5, flags.DEFINE_integer('cloudpipe_start_port', 12000, 'Starting port for mapped CloudPipe external ports') + logging.getLogger().setLevel(logging.DEBUG) diff --git a/nova/network/service.py b/nova/network/service.py index 9c0f5520bf..22e84477fd 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -21,17 +21,17 @@ Network Hosts are responsible for allocating ips and setting up network """ from nova import datastore +from nova import exception from nova import flags from nova import service from nova import utils from nova.auth import manager -from nova.exception import NotFound from nova.network import exception from nova.network import model from nova.network import vpn -FLAGS = flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_string('network_type', 'flat', 'Service Class for Networking') @@ -41,15 +41,15 @@ flags.DEFINE_list('flat_network_ips', ['192.168.0.2', '192.168.0.3', '192.168.0.4'], 'Available ips for simple network') flags.DEFINE_string('flat_network_network', '192.168.0.0', - 'Network for simple network') + 'Network for simple network') flags.DEFINE_string('flat_network_netmask', '255.255.255.0', - 'Netmask for simple network') + 'Netmask for simple network') flags.DEFINE_string('flat_network_gateway', '192.168.0.1', - 'Broadcast for simple network') + 'Broadcast for simple network') flags.DEFINE_string('flat_network_broadcast', '192.168.0.255', - 'Broadcast for simple network') + 'Broadcast for simple network') flags.DEFINE_string('flat_network_dns', '8.8.4.4', - 'Dns for simple network') + 'Dns for simple network') def type_to_class(network_type): @@ -58,7 +58,7 @@ def type_to_class(network_type): return FlatNetworkService elif network_type == 'vlan': return VlanNetworkService - raise NotFound("Couldn't find %s network type" % network_type) + raise exception.NotFound("Couldn't find %s network type" % network_type) def setup_compute_network(network_type, user_id, project_id, security_group): diff --git a/nova/network/vpn.py b/nova/network/vpn.py index a0e2a7fa18..cf2579e61c 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -23,9 +23,8 @@ from nova import exception from nova import flags from nova import utils + FLAGS = flags.FLAGS - - flags.DEFINE_string('vpn_ip', utils.get_my_ip(), 'Public IP for the cloudpipe VPN servers') flags.DEFINE_integer('vpn_start_port', 1000, diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py index b42a96233a..c2b412dd79 100644 --- a/nova/objectstore/bucket.py +++ b/nova/objectstore/bucket.py @@ -36,6 +36,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('buckets_path', utils.abspath('../buckets'), 'path to s3 buckets') + class Bucket(object): def __init__(self, name): self.name = name diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index dfe1918e3b..035e342cac 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -38,17 +38,19 @@ S3 client with this module:: """ import datetime -import logging import json +import logging import multiprocessing import os -from tornado import escape import urllib -from twisted.application import internet, service -from twisted.web.resource import Resource -from twisted.web import server, static, error - +from tornado import escape +from twisted.application import internet +from twisted.application import service +from twisted.web import error +from twisted.web import resource +from twisted.web import server +from twisted.web import static from nova import exception from nova import flags @@ -60,6 +62,7 @@ from nova.objectstore import image FLAGS = flags.FLAGS + def render_xml(request, value): assert isinstance(value, dict) and len(value) == 1 request.setHeader("Content-Type", "application/xml; charset=UTF-8") @@ -72,11 +75,13 @@ def render_xml(request, value): request.write('') request.finish() + def finish(request, content=None): if content: request.write(content) request.finish() + def _render_parts(value, write_cb): if isinstance(value, basestring): write_cb(escape.xhtml_escape(value)) @@ -95,11 +100,13 @@ def _render_parts(value, write_cb): else: raise Exception("Unknown S3 value type %r", value) + def get_argument(request, key, default_value): if key in request.args: return request.args[key][0] return default_value + def get_context(request): try: # Authorization Header format: 'AWS :' @@ -120,13 +127,14 @@ def get_context(request): logging.debug("Authentication Failure: %s" % ex) raise exception.NotAuthorized -class ErrorHandlingResource(Resource): + +class ErrorHandlingResource(resource.Resource): """Maps exceptions to 404 / 401 codes. Won't work for exceptions thrown after NOT_DONE_YET is returned.""" # TODO(unassigned) (calling-all-twisted-experts): This needs to be plugged in to the right place in twisted... # This doesn't look like it's the right place (consider exceptions in getChild; or after NOT_DONE_YET is returned def render(self, request): try: - return Resource.render(self, request) + return resource.Resource.render(self, request) except exception.NotFound: request.setResponseCode(404) return '' @@ -134,6 +142,7 @@ class ErrorHandlingResource(Resource): request.setResponseCode(403) return '' + class S3(ErrorHandlingResource): """Implementation of an S3-like storage server based on local files.""" def getChild(self, name, request): @@ -154,9 +163,10 @@ class S3(ErrorHandlingResource): }}) return server.NOT_DONE_YET + class BucketResource(ErrorHandlingResource): def __init__(self, name): - Resource.__init__(self) + resource.Resource.__init__(self) self.name = name def getChild(self, name, request): @@ -206,7 +216,7 @@ class BucketResource(ErrorHandlingResource): class ObjectResource(ErrorHandlingResource): def __init__(self, bucket, name): - Resource.__init__(self) + resource.Resource.__init__(self) self.bucket = bucket self.name = name @@ -245,17 +255,19 @@ class ObjectResource(ErrorHandlingResource): request.setResponseCode(204) return '' + class ImageResource(ErrorHandlingResource): isLeaf = True def __init__(self, name): - Resource.__init__(self) + resource.Resource.__init__(self) self.img = image.Image(name) def render_GET(self, request): return static.File(self.img.image_path, defaultType='application/octet-stream').render_GET(request) -class ImagesResource(Resource): + +class ImagesResource(resource.Resource): def getChild(self, name, request): if name == '': return self @@ -339,11 +351,13 @@ class ImagesResource(Resource): request.setResponseCode(204) return '' + def get_site(): root = S3() site = server.Site(root) return site + def get_application(): factory = get_site() application = service.Application("objectstore") diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index 861eb364fc..fb780a0ec5 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -42,6 +42,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('images_path', utils.abspath('../images'), 'path to decrypted images') + class Image(object): def __init__(self, image_id): self.image_id = image_id diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py index 81c047b22c..9829194cb8 100644 --- a/nova/objectstore/stored.py +++ b/nova/objectstore/stored.py @@ -23,7 +23,7 @@ Properties of an object stored within a bucket. import os import nova.crypto -from nova.exception import NotFound, NotAuthorized +from nova import exception class Object(object): @@ -33,7 +33,7 @@ class Object(object): self.key = key self.path = bucket._object_path(key) if not os.path.isfile(self.path): - raise NotFound + raise exception.NotFound def __repr__(self): return "" % (self.bucket, self.key) diff --git a/nova/process.py b/nova/process.py index 2dc56372f3..86f29e2c45 100644 --- a/nova/process.py +++ b/nova/process.py @@ -23,6 +23,7 @@ Process pool, still buggy right now. import logging import multiprocessing import StringIO + from twisted.internet import defer from twisted.internet import error from twisted.internet import process @@ -205,6 +206,7 @@ class ProcessPool(object): self._pool.release() return rv + class SharedPool(object): _instance = None def __init__(self): @@ -213,5 +215,6 @@ class SharedPool(object): def __getattr__(self, key): return getattr(self._instance, key) + def simple_execute(cmd, **kwargs): return SharedPool().simple_execute(cmd, **kwargs) diff --git a/nova/rpc.py b/nova/rpc.py index 4ac546c2a6..824a66b5b8 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -21,12 +21,13 @@ AMQP-based RPC. Queues have consumers and publishers. No fan-out support yet. """ -from carrot import connection as carrot_connection -from carrot import messaging import json import logging import sys import uuid + +from carrot import connection as carrot_connection +from carrot import messaging from twisted.internet import defer from twisted.internet import task diff --git a/nova/test.py b/nova/test.py index c7e08734fe..c392c8a84d 100644 --- a/nova/test.py +++ b/nova/test.py @@ -22,11 +22,11 @@ Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ -import mox -import stubout import sys import time +import mox +import stubout from tornado import ioloop from twisted.internet import defer from twisted.trial import unittest @@ -91,7 +91,6 @@ class TrialTestCase(unittest.TestCase): setattr(FLAGS, k, v) - class BaseTestCase(TrialTestCase): # TODO(jaypipes): Can this be moved into the TrialTestCase class? """Base test case class for all unit tests.""" diff --git a/nova/utils.py b/nova/utils.py index 63db080f11..e826f9b714 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -20,7 +20,7 @@ System-level utilities and helper functions. """ -from datetime import datetime, timedelta +import datetime import inspect import logging import os @@ -32,9 +32,11 @@ import sys from nova import exception from nova import flags + FLAGS = flags.FLAGS TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" + def import_class(import_str): """Returns a class from a string including module and class""" mod_str, _sep, class_str = import_str.rpartition('.') @@ -44,6 +46,7 @@ def import_class(import_str): except (ImportError, ValueError, AttributeError): raise exception.NotFound('Class %s cannot be found' % class_str) + def fetchfile(url, target): logging.debug("Fetching %s" % url) # c = pycurl.Curl() @@ -55,6 +58,7 @@ def fetchfile(url, target): # fp.close() execute("curl %s -o %s" % (url, target)) + def execute(cmd, input=None, addl_env=None): env = os.environ.copy() if addl_env: @@ -129,10 +133,12 @@ def get_my_ip(): logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex) return "127.0.0.1" + def isotime(at=None): if not at: - at = datetime.utcnow() + at = datetime.datetime.utcnow() return at.strftime(TIME_FORMAT) + def parse_isotime(timestr): - return datetime.strptime(timestr, TIME_FORMAT) + return datetime.datetime.strptime(timestr, TIME_FORMAT) diff --git a/nova/validate.py b/nova/validate.py index a69306fad0..21f4ed286b 100644 --- a/nova/validate.py +++ b/nova/validate.py @@ -57,6 +57,7 @@ def rangetest(**argchecks): # validate ranges for both+defaults return onCall return onDecorator + def typetest(**argchecks): def onDecorator(func): import sys diff --git a/nova/virt/images.py b/nova/virt/images.py index 1e23c48b91..a3ca72bdd6 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -27,11 +27,11 @@ import urlparse from nova import flags from nova import process -from nova.auth import signer from nova.auth import manager +from nova.auth import signer + FLAGS = flags.FLAGS - flags.DEFINE_bool('use_s3', True, 'whether to get images from s3 or use local copy') @@ -43,6 +43,7 @@ def fetch(image, path, user, project): f = _fetch_local_image return f(image, path, user, project) + def _fetch_s3_image(image, path, user, project): url = image_url(image) @@ -66,13 +67,16 @@ def _fetch_s3_image(image, path, user, project): cmd += ['-o', path] return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) + def _fetch_local_image(image, path, user, project): source = _image_path('%s/image' % image) return process.simple_execute('cp %s %s' % (source, path)) + def _image_path(path): return os.path.join(FLAGS.images_path, path) + def image_url(image): return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, image) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 97e1b0ab2e..d1a4a6b676 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -42,6 +42,7 @@ from nova.virt import images libvirt = None libxml2 = None + FLAGS = flags.FLAGS flags.DEFINE_string('libvirt_xml_template', utils.abspath('virt/libvirt.qemu.xml.template'), @@ -57,7 +58,9 @@ flags.DEFINE_string('libvirt_type', 'Libvirt domain type (valid options are: kvm, qemu, uml)') flags.DEFINE_string('libvirt_uri', '', - 'Override the default libvirt URI (which is dependent on libvirt_type)') + 'Override the default libvirt URI (which is dependent' + ' on libvirt_type)') + def get_connection(read_only): # These are loaded late so that there's no need to install these @@ -70,6 +73,7 @@ def get_connection(read_only): libxml2 = __import__('libxml2') return LibvirtConnection(read_only) + class LibvirtConnection(object): def __init__(self, read_only): self.libvirt_uri, template_file = self.get_uri_and_template() @@ -78,14 +82,12 @@ class LibvirtConnection(object): self._wrapped_conn = None self.read_only = read_only - @property def _conn(self): if not self._wrapped_conn: self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn - def get_uri_and_template(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' @@ -95,7 +97,6 @@ class LibvirtConnection(object): template_file = FLAGS.libvirt_xml_template return uri, template_file - def _connect(self, uri, read_only): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', @@ -106,13 +107,10 @@ class LibvirtConnection(object): else: return libvirt.openAuth(uri, auth, 0) - - def list_instances(self): return [self._conn.lookupByID(x).name() for x in self._conn.listDomainsID()] - def destroy(self, instance): try: virt_dom = self._conn.lookupByName(instance.name) @@ -141,14 +139,12 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) return d - def _cleanup(self, instance): target = os.path.abspath(instance.datamodel['basepath']) logging.info("Deleting instance files at %s", target) if os.path.exists(target): shutil.rmtree(target) - @defer.inlineCallbacks @exception.wrap_exception def reboot(self, instance): @@ -174,7 +170,6 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) yield d - @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): @@ -205,7 +200,6 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) yield local_d - @defer.inlineCallbacks def _create_image(self, instance, libvirt_xml): # syntactic nicety @@ -260,11 +254,9 @@ class LibvirtConnection(object): yield disk.partition( basepath('disk-raw'), basepath('disk'), bytes, execute=execute) - def basepath(self, instance, path=''): return os.path.abspath(os.path.join(instance.datamodel['basepath'], path)) - def toXml(self, instance): # TODO(termie): cache? logging.debug("Starting the toXML method") @@ -279,7 +271,6 @@ class LibvirtConnection(object): return libvirt_xml - def get_info(self, instance_id): virt_dom = self._conn.lookupByName(instance_id) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() @@ -289,7 +280,6 @@ class LibvirtConnection(object): 'num_cpu': num_cpu, 'cpu_time': cpu_time} - def get_disks(self, instance_id): """ Note that this function takes an instance ID, not an Instance, so @@ -332,7 +322,6 @@ class LibvirtConnection(object): return disks - def get_interfaces(self, instance_id): """ Note that this function takes an instance ID, not an Instance, so @@ -375,7 +364,6 @@ class LibvirtConnection(object): return interfaces - def block_stats(self, instance_id, disk): """ Note that this function takes an instance ID, not an Instance, so @@ -384,7 +372,6 @@ class LibvirtConnection(object): domain = self._conn.lookupByName(instance_id) return domain.blockStats(disk) - def interface_stats(self, instance_id, interface): """ Note that this function takes an instance ID, not an Instance, so diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 9fe15644f4..2f59949831 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -33,16 +33,29 @@ from nova.virt import images XenAPI = None + FLAGS = flags.FLAGS flags.DEFINE_string('xenapi_connection_url', None, - 'URL for connection to XenServer/Xen Cloud Platform. Required if connection_type=xenapi.') + 'URL for connection to XenServer/Xen Cloud Platform.' + ' Required if connection_type=xenapi.') flags.DEFINE_string('xenapi_connection_username', 'root', - 'Username for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.') + 'Username for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') flags.DEFINE_string('xenapi_connection_password', None, - 'Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.') + 'Password for connection to XenServer/Xen Cloud Platform.' + ' Used only if connection_type=xenapi.') + + +XENAPI_POWER_STATE = { + 'Halted' : power_state.SHUTDOWN, + 'Running' : power_state.RUNNING, + 'Paused' : power_state.PAUSED, + 'Suspended': power_state.SHUTDOWN, # FIXME + 'Crashed' : power_state.CRASHED +} def get_connection(_): @@ -62,7 +75,6 @@ def get_connection(_): class XenAPIConnection(object): - def __init__(self, url, user, pw): self._conn = XenAPI.Session(url) self._conn.login_with_password(user, pw) @@ -107,7 +119,6 @@ class XenAPIConnection(object): yield self._create_vif(vm_ref, network_ref, mac_address) yield self._conn.xenapi.VM.start(vm_ref, False, False) - def create_vm(self, instance, kernel, ramdisk): mem = str(long(instance.datamodel['memory_kb']) * 1024) vcpus = str(instance.datamodel['vcpus']) @@ -145,7 +156,6 @@ class XenAPIConnection(object): logging.debug('Created VM %s as %s.', instance.name, vm_ref) return vm_ref - def create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): vbd_rec = {} vbd_rec['VM'] = vm_ref @@ -166,7 +176,6 @@ class XenAPIConnection(object): vdi_ref) return vbd_ref - def _create_vif(self, vm_ref, network_ref, mac_address): vif_rec = {} vif_rec['device'] = '0' @@ -184,7 +193,6 @@ class XenAPIConnection(object): vm_ref, network_ref) return vif_ref - def _find_network_with_bridge(self, bridge): expr = 'field "bridge" = "%s"' % bridge networks = self._conn.xenapi.network.get_all_records_where(expr) @@ -195,7 +203,6 @@ class XenAPIConnection(object): else: raise Exception('Found no network for bridge %s' % bridge) - def fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -213,7 +220,6 @@ class XenAPIConnection(object): args['add_partition'] = 'true' return self._call_plugin('objectstore', fn, args) - def reboot(self, instance): vm = self.lookup(instance.name) if vm is None: @@ -231,7 +237,7 @@ class XenAPIConnection(object): if vm is None: raise Exception('instance not present %s' % instance_id) rec = self._conn.xenapi.VM.get_record(vm) - return {'state': power_state_from_xenapi[rec['power_state']], + return {'state': XENAPI_POWER_STATE[rec['power_state']], 'max_mem': long(rec['memory_static_max']) >> 10, 'mem': long(rec['memory_dynamic_max']) >> 10, 'num_cpu': rec['VCPUs_max'], @@ -247,26 +253,15 @@ class XenAPIConnection(object): else: return vms[0] - def _call_plugin(self, plugin, fn, args): return _unwrap_plugin_exceptions( self._conn.xenapi.host.call_plugin, self._get_xenapi_host(), plugin, fn, args) - def _get_xenapi_host(self): return self._conn.xenapi.session.get_this_host(self._conn.handle) -power_state_from_xenapi = { - 'Halted' : power_state.SHUTDOWN, - 'Running' : power_state.RUNNING, - 'Paused' : power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME - 'Crashed' : power_state.CRASHED -} - - def _unwrap_plugin_exceptions(func, *args, **kwargs): try: return func(*args, **kwargs) diff --git a/nova/volume/service.py b/nova/volume/service.py index 66163a8121..104bafe902 100644 --- a/nova/volume/service.py +++ b/nova/volume/service.py @@ -65,6 +65,7 @@ flags.DEFINE_boolean('fake_storage', False, class NoMoreBlades(exception.Error): pass + def get_volume(volume_id): """ Returns a redis-backed volume object """ volume_class = Volume @@ -75,6 +76,7 @@ def get_volume(volume_id): return vol raise exception.Error("Volume does not exist") + class VolumeService(service.Service): """ There is one VolumeNode running on each host. @@ -142,6 +144,7 @@ class VolumeService(service.Service): "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev)) + class Volume(datastore.BasicModel): def __init__(self, volume_id=None): @@ -297,7 +300,6 @@ class Volume(datastore.BasicModel): self['blade_id']), error_ok=1) - class FakeVolume(Volume): def _create_lv(self): pass diff --git a/run_tests.py b/run_tests.py index d90ac81750..77aa9088aa 100644 --- a/run_tests.py +++ b/run_tests.py @@ -38,11 +38,11 @@ Due to our use of multiprocessing it we frequently get some ignorable 'Interrupted system call' exceptions after test completion. """ + import __main__ import os import sys - from twisted.scripts import trial as trial_script from nova import datastore @@ -65,13 +65,12 @@ from nova.tests.volume_unittest import * FLAGS = flags.FLAGS - flags.DEFINE_bool('flush_db', True, 'Flush the database before running fake tests') - flags.DEFINE_string('tests_stderr', 'run_tests.err.log', - 'Path to where to pipe STDERR during test runs. ' - 'Default = "run_tests.err.log"') + 'Path to where to pipe STDERR during test runs.' + ' Default = "run_tests.err.log"') + if __name__ == '__main__': OptionsClass = twistd.WrapTwistedOptions(trial_script.Options) From 5c4a806c852a1c7180bc1c7e2ea8f065198e36d2 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 16 Aug 2010 10:57:42 -0400 Subject: [PATCH 091/101] PEP8 and name corrections --- bin/nova-rsapi | 2 +- nova/endpoint/aws/__init__.py | 4 ++-- nova/endpoint/rackspace/__init__.py | 10 +++++----- nova/endpoint/rackspace/controllers/base.py | 4 ++-- nova/wsgi.py | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/bin/nova-rsapi b/bin/nova-rsapi index 3fc61860eb..a35936effa 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -32,4 +32,4 @@ flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') if __name__ == '__main__': utils.default_flagfile() - wsgi.run_server(nova.endpoint.ApiVersionRouter(), FLAGS.cc_port) + wsgi.run_server(nova.endpoint.APIVersionRouter(), FLAGS.cc_port) diff --git a/nova/endpoint/aws/__init__.py b/nova/endpoint/aws/__init__.py index f49270a308..4507cae628 100644 --- a/nova/endpoint/aws/__init__.py +++ b/nova/endpoint/aws/__init__.py @@ -4,7 +4,7 @@ import webob.dec from nova import wsgi # TODO(gundlach): temp -class Api(wsgi.Router): +class API(wsgi.Router): """WSGI entry point for all AWS API requests.""" def __init__(self): @@ -14,7 +14,7 @@ class Api(wsgi.Router): targets = {"dummy": self.dummy } - super(Api, self).__init__(mapper, targets) + super(API, self).__init__(mapper, targets) @webob.dec.wsgify def dummy(self, req): diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index f14f6218c6..162b35caaf 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -37,12 +37,12 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') -class Api(wsgi.Middleware): +class API(wsgi.Middleware): """WSGI entry point for all Rackspace API requests.""" def __init__(self): - app = AuthMiddleware(ApiRouter()) - super(Api, self).__init__(app) + app = AuthMiddleware(APIRouter()) + super(API, self).__init__(app) class AuthMiddleware(wsgi.Middleware): @@ -66,7 +66,7 @@ class AuthMiddleware(wsgi.Middleware): return self.application -class ApiRouter(wsgi.Router): +class APIRouter(wsgi.Router): """ Routes requests on the Rackspace API to the appropriate controller and method. @@ -87,4 +87,4 @@ class ApiRouter(wsgi.Router): 'sharedipgroups': controllers.SharedIpGroupsController() } - super(ApiRouter, self).__init__(mapper, targets) + super(APIRouter, self).__init__(mapper, targets) diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py index 3ada53fd4b..8cd44f62e9 100644 --- a/nova/endpoint/rackspace/controllers/base.py +++ b/nova/endpoint/rackspace/controllers/base.py @@ -1,6 +1,6 @@ -from nova.wsgi import WSGIController +from nova import wsgi -class BaseController(WSGIController): +class BaseController(wsgi.Controller): @classmethod def render(cls, instance): if isinstance(instance, list): diff --git a/nova/wsgi.py b/nova/wsgi.py index 0570e18298..52e155101e 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -146,7 +146,7 @@ class Router(object): Each route in `mapper` must specify a 'controller' string, which is a key into the 'targets' dictionary whose value is a WSGI app to - run. If routing to a WSGIController, you'll want to specify + run. If routing to a wsgi.Controller, you'll want to specify 'action' as well so the controller knows what method to call on itself. @@ -195,7 +195,7 @@ class Router(object): return app -class WSGIController(object): +class Controller(object): """ WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method on itself. From f78a8936b1a401f07fc0a09d4bd150d2793e436e Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 16 Aug 2010 13:22:41 -0400 Subject: [PATCH 092/101] All controller actions receive a 'req' parameter containing the webob Request. --- nova/endpoint/__init__.py | 10 ++-- nova/endpoint/aws/__init__.py | 6 +-- nova/endpoint/rackspace/__init__.py | 21 ++++---- .../endpoint/rackspace/controllers/servers.py | 2 +- nova/wsgi.py | 48 +++++++++---------- 5 files changed, 40 insertions(+), 47 deletions(-) diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py index 065f45848e..9aae933afb 100644 --- a/nova/endpoint/__init__.py +++ b/nova/endpoint/__init__.py @@ -36,16 +36,16 @@ import routes from nova.endpoint import rackspace from nova.endpoint import aws -class ApiVersionRouter(wsgi.Router): +class APIVersionRouter(wsgi.Router): """Routes top-level requests to the appropriate API.""" def __init__(self): mapper = routes.Mapper() - mapper.connect(None, "/v1.0/{path_info:.*}", controller="rs") - mapper.connect(None, "/ec2/{path_info:.*}", controller="ec2") + rsapi = rackspace.API() + mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - targets = {"rs": rackspace.Api(), "ec2": aws.Api()} + mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - super(ApiVersionRouter, self).__init__(mapper, targets) + super(APIVersionRouter, self).__init__(mapper) diff --git a/nova/endpoint/aws/__init__.py b/nova/endpoint/aws/__init__.py index 4507cae628..55cbb8fd3a 100644 --- a/nova/endpoint/aws/__init__.py +++ b/nova/endpoint/aws/__init__.py @@ -10,11 +10,9 @@ class API(wsgi.Router): def __init__(self): mapper = routes.Mapper() - mapper.connect(None, "{all:.*}", controller="dummy") + mapper.connect(None, "{all:.*}", controller=self.dummy) - targets = {"dummy": self.dummy } - - super(API, self).__init__(mapper, targets) + super(API, self).__init__(mapper) @webob.dec.wsgify def dummy(self, req): diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index 162b35caaf..78b9c94292 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -75,16 +75,13 @@ class APIRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() - mapper.resource("server", "servers") - mapper.resource("image", "images") - mapper.resource("flavor", "flavors") - mapper.resource("sharedipgroup", "sharedipgroups") + mapper.resource("server", "servers", + controller=controllers.ServersController()) + mapper.resource("image", "images", + controller=controllers.ImagesController()) + mapper.resource("flavor", "flavors", + controller=controllers.FlavorsController()) + mapper.resource("sharedipgroup", "sharedipgroups", + controller=controllers.SharedIpGroupsController()) - targets = { - 'servers': controllers.ServersController(), - 'images': controllers.ImagesController(), - 'flavors': controllers.FlavorsController(), - 'sharedipgroups': controllers.SharedIpGroupsController() - } - - super(APIRouter, self).__init__(mapper, targets) + super(APIRouter, self).__init__(mapper) diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/endpoint/rackspace/controllers/servers.py index db02e058d1..2f8e662d69 100644 --- a/nova/endpoint/rackspace/controllers/servers.py +++ b/nova/endpoint/rackspace/controllers/servers.py @@ -5,7 +5,7 @@ from nova.endpoint.rackspace.controllers.base import BaseController class ServersController(BaseController): entity_name = 'servers' - def index(cls): + def index(self, **kwargs): return [instance_details(inst) for inst in compute.InstanceDirectory().all] def show(self, **kwargs): diff --git a/nova/wsgi.py b/nova/wsgi.py index 52e155101e..a0a175dc74 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -140,34 +140,31 @@ class Router(object): WSGI middleware that maps incoming requests to WSGI apps. """ - def __init__(self, mapper, targets): + def __init__(self, mapper): """ Create a router for the given routes.Mapper. - Each route in `mapper` must specify a 'controller' string, which is - a key into the 'targets' dictionary whose value is a WSGI app to - run. If routing to a wsgi.Controller, you'll want to specify - 'action' as well so the controller knows what method to call on - itself. + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be a wsgi.Controller, who will route + the request to the action method. Examples: mapper = routes.Mapper() - targets = { "servers": ServerController(), "blog": BlogWsgiApp() } + sc = ServerController() # Explicit mapping of one route to a controller+action - mapper.connect(None, "/svrlist", controller="servers", action="list") + mapper.connect(None, "/svrlist", controller=sc, action="list") - # Controller string is implicitly equal to 2nd param here, and - # actions are all implicitly defined - mapper.resource("server", "servers") + # Actions are all implicitly defined + mapper.resource("server", "servers", controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. - mapper.connect(None, "/v1.0/{path_info:.*}", controller="blog") + mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) """ self.map = mapper - self.targets = targets self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @@ -186,31 +183,32 @@ class Router(object): and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ - if req.environ['routes.route'] is None: - return webob.exc.HTTPNotFound() match = req.environ['wsgiorg.routing_args'][1] - app_name = match['controller'] - - app = self.targets[app_name] + if not match: + return webob.exc.HTTPNotFound() + app = match['controller'] return app class Controller(object): """ WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method on itself. + and calls the requested action method upon itself. All action methods + must, in addition to their normal parameters, accept a 'req' argument + which is the incoming webob.Request. """ @webob.dec.wsgify def __call__(self, req): """ - Call the method on self specified in req.environ by RoutesMiddleware. + Call the method specified in req.environ by RoutesMiddleware. """ - routes_dict = req.environ['wsgiorg.routing_args'][1] - action = routes_dict['action'] + arg_dict = req.environ['wsgiorg.routing_args'][1] + action = arg_dict['action'] method = getattr(self, action) - del routes_dict['controller'] - del routes_dict['action'] - return method(**routes_dict) + del arg_dict['controller'] + del arg_dict['action'] + arg_dict['req'] = req + return method(**arg_dict) class Serializer(object): From 9878a6b8b4691e206dc5d35c39313880db34f229 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 17 Aug 2010 11:03:15 -0400 Subject: [PATCH 093/101] Simpler installation, and, can run install_venv from anywhere instead of just from checkout root --- tools/install_venv.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/install_venv.py b/tools/install_venv.py index e1a270638d..4e775eb334 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -38,15 +38,16 @@ def die(message, *args): def run_command(cmd, redirect_output=True, error_ok=False): - """Runs a command in an out-of-process shell, returning the - output of that command + """ + Runs a command in an out-of-process shell, returning the + output of that command. Working directory is ROOT. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None - proc = subprocess.Popen(cmd, stdout=stdout) + proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout) output = proc.communicate()[0] if not error_ok and proc.returncode != 0: die('Command "%s" failed.\n%s', ' '.join(cmd), output) @@ -94,6 +95,12 @@ def install_dependencies(venv=VENV): redirect_output=False) + # Tell the virtual env how to "import nova" + pathfile=os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth") + f=open(pathfile, 'w') + f.write("%s\n" % ROOT) + + def print_help(): help = """ Nova development environment setup is complete. From f92851ba8ffcb530f6f3c4ea354dd89d29146f6c Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 17 Aug 2010 13:03:38 -0400 Subject: [PATCH 094/101] Remove duplicate definition of flag --- nova/endpoint/rackspace/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index 78b9c94292..ac53ee10b2 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -33,10 +33,6 @@ from nova.auth import manager from nova.endpoint.rackspace import controllers -FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - - class API(wsgi.Middleware): """WSGI entry point for all Rackspace API requests.""" From e8be36d7a7be2ebbf5493766ce909d7913bf61e0 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 17 Aug 2010 13:23:20 -0400 Subject: [PATCH 095/101] Move eventlet-using class out of endpoint/__init__.py into its own submodule, so that twisted-related code using endpoint.[other stuff] wouldn't run eventlet and make unit tests throw crazy errors about eventlet 0.9.10 not playing nicely with twisted. --- bin/nova-rsapi | 5 ++-- nova/endpoint/__init__.py | 51 --------------------------------------- nova/endpoint/newapi.py | 51 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 54 deletions(-) create mode 100644 nova/endpoint/newapi.py diff --git a/bin/nova-rsapi b/bin/nova-rsapi index a35936effa..e2722422ec 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -21,15 +21,14 @@ Daemon for the Rackspace API endpoint. """ -import nova.endpoint - from nova import flags from nova import utils from nova import wsgi +from nova.endpoint import newapi FLAGS = flags.FLAGS flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') if __name__ == '__main__': utils.default_flagfile() - wsgi.run_server(nova.endpoint.APIVersionRouter(), FLAGS.cc_port) + wsgi.run_server(newapi.APIVersionRouter(), FLAGS.cc_port) diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py index 9aae933afb..e69de29bb2 100644 --- a/nova/endpoint/__init__.py +++ b/nova/endpoint/__init__.py @@ -1,51 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`nova.endpoint` -- Main NOVA Api endpoints -===================================================== - -.. automodule:: nova.endpoint - :platform: Unix - :synopsis: REST APIs for all nova functions -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith -""" - -from nova import wsgi -import routes -from nova.endpoint import rackspace -from nova.endpoint import aws - -class APIVersionRouter(wsgi.Router): - """Routes top-level requests to the appropriate API.""" - - def __init__(self): - mapper = routes.Mapper() - - rsapi = rackspace.API() - mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - - mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - - super(APIVersionRouter, self).__init__(mapper) - diff --git a/nova/endpoint/newapi.py b/nova/endpoint/newapi.py new file mode 100644 index 0000000000..9aae933afb --- /dev/null +++ b/nova/endpoint/newapi.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`nova.endpoint` -- Main NOVA Api endpoints +===================================================== + +.. automodule:: nova.endpoint + :platform: Unix + :synopsis: REST APIs for all nova functions +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" + +from nova import wsgi +import routes +from nova.endpoint import rackspace +from nova.endpoint import aws + +class APIVersionRouter(wsgi.Router): + """Routes top-level requests to the appropriate API.""" + + def __init__(self): + mapper = routes.Mapper() + + rsapi = rackspace.API() + mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) + + mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) + + super(APIVersionRouter, self).__init__(mapper) + From 1e403e56dc1147ce3feea1b8931948bc35f23a44 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 17 Aug 2010 16:43:37 -0700 Subject: [PATCH 096/101] In an effort to keep new and old API code separate, I've created a nova.api to put all new API code under. This means nova.endpoint only contains the old Tornado implementation. I also cleaned up a few pep8 and other style nits in the new API code. --- bin/{nova-rsapi => nova-api-new} | 8 +-- nova/api/__init__.py | 38 ++++++++++++++ nova/api/ec2/__init__.py | 42 +++++++++++++++ nova/{endpoint => api}/rackspace/__init__.py | 22 ++++---- nova/api/rackspace/controllers/__init__.py | 0 nova/api/rackspace/controllers/base.py | 30 +++++++++++ nova/api/rackspace/controllers/flavors.py | 18 +++++++ nova/api/rackspace/controllers/images.py | 18 +++++++ .../rackspace/controllers/servers.py | 26 ++++++++-- .../rackspace/controllers/sharedipgroups.py | 18 +++++++ nova/endpoint/aws/__init__.py | 22 -------- nova/endpoint/newapi.py | 51 ------------------- .../rackspace/controllers/__init__.py | 5 -- nova/endpoint/rackspace/controllers/base.py | 9 ---- .../endpoint/rackspace/controllers/flavors.py | 1 - nova/endpoint/rackspace/controllers/images.py | 1 - .../rackspace/controllers/sharedipgroups.py | 1 - 17 files changed, 201 insertions(+), 109 deletions(-) rename bin/{nova-rsapi => nova-api-new} (83%) create mode 100644 nova/api/__init__.py create mode 100644 nova/api/ec2/__init__.py rename nova/{endpoint => api}/rackspace/__init__.py (80%) create mode 100644 nova/api/rackspace/controllers/__init__.py create mode 100644 nova/api/rackspace/controllers/base.py create mode 100644 nova/api/rackspace/controllers/flavors.py create mode 100644 nova/api/rackspace/controllers/images.py rename nova/{endpoint => api}/rackspace/controllers/servers.py (72%) create mode 100644 nova/api/rackspace/controllers/sharedipgroups.py delete mode 100644 nova/endpoint/aws/__init__.py delete mode 100644 nova/endpoint/newapi.py delete mode 100644 nova/endpoint/rackspace/controllers/__init__.py delete mode 100644 nova/endpoint/rackspace/controllers/base.py delete mode 100644 nova/endpoint/rackspace/controllers/flavors.py delete mode 100644 nova/endpoint/rackspace/controllers/images.py delete mode 100644 nova/endpoint/rackspace/controllers/sharedipgroups.py diff --git a/bin/nova-rsapi b/bin/nova-api-new similarity index 83% rename from bin/nova-rsapi rename to bin/nova-api-new index e2722422ec..fda42339c4 100755 --- a/bin/nova-rsapi +++ b/bin/nova-api-new @@ -18,17 +18,17 @@ # See the License for the specific language governing permissions and # limitations under the License. """ - Daemon for the Rackspace API endpoint. +Nova API daemon. """ +from nova import api from nova import flags from nova import utils from nova import wsgi -from nova.endpoint import newapi FLAGS = flags.FLAGS -flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') +flags.DEFINE_integer('api_port', 8773, 'API port') if __name__ == '__main__': utils.default_flagfile() - wsgi.run_server(newapi.APIVersionRouter(), FLAGS.cc_port) + wsgi.run_server(api.API(), FLAGS.api_port) diff --git a/nova/api/__init__.py b/nova/api/__init__.py new file mode 100644 index 0000000000..a6bb93348b --- /dev/null +++ b/nova/api/__init__.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Root WSGI middleware for all API controllers. +""" + +import routes + +from nova import wsgi +from nova.api import ec2 +from nova.api import rackspace + + +class API(wsgi.Router): + """Routes top-level requests to the appropriate controller.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.connect(None, "/v1.0/{path_info:.*}", + controller=rackspace.API()) + mapper.connect(None, "/ec2/{path_info:.*}", controller=ec2.API()) + super(API, self).__init__(mapper) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py new file mode 100644 index 0000000000..6eec0abf76 --- /dev/null +++ b/nova/api/ec2/__init__.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for EC2 API controllers. +""" + +import routes +import webob.dec + +from nova import wsgi + + +class API(wsgi.Router): + """Routes EC2 requests to the appropriate controller.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.connect(None, "{all:.*}", controller=self.dummy) + super(API, self).__init__(mapper) + + @staticmethod + @webob.dec.wsgify + def dummy(req): + """Temporary dummy controller.""" + msg = "dummy response -- please hook up __init__() to cloud.py instead" + return repr({'dummy': msg, + 'kwargs': repr(req.environ['wsgiorg.routing_args'][1])}) diff --git a/nova/endpoint/rackspace/__init__.py b/nova/api/rackspace/__init__.py similarity index 80% rename from nova/endpoint/rackspace/__init__.py rename to nova/api/rackspace/__init__.py index ac53ee10b2..662cbe4958 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/api/rackspace/__init__.py @@ -17,20 +17,23 @@ # under the License. """ -Rackspace API Endpoint +WSGI middleware for Rackspace API controllers. """ import json import time +import routes import webob.dec import webob.exc -import routes from nova import flags from nova import wsgi +from nova.api.rackspace.controllers import flavors +from nova.api.rackspace.controllers import images +from nova.api.rackspace.controllers import servers +from nova.api.rackspace.controllers import sharedipgroups from nova.auth import manager -from nova.endpoint.rackspace import controllers class API(wsgi.Middleware): @@ -70,14 +73,9 @@ class APIRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() - - mapper.resource("server", "servers", - controller=controllers.ServersController()) - mapper.resource("image", "images", - controller=controllers.ImagesController()) - mapper.resource("flavor", "flavors", - controller=controllers.FlavorsController()) + mapper.resource("server", "servers", controller=servers.Controller()) + mapper.resource("image", "images", controller=images.Controller()) + mapper.resource("flavor", "flavors", controller=flavors.Controller()) mapper.resource("sharedipgroup", "sharedipgroups", - controller=controllers.SharedIpGroupsController()) - + controller=sharedipgroups.Controller()) super(APIRouter, self).__init__(mapper) diff --git a/nova/api/rackspace/controllers/__init__.py b/nova/api/rackspace/controllers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nova/api/rackspace/controllers/base.py b/nova/api/rackspace/controllers/base.py new file mode 100644 index 0000000000..dd2c6543c5 --- /dev/null +++ b/nova/api/rackspace/controllers/base.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import wsgi + + +class Controller(wsgi.Controller): + """TODO(eday): Base controller for all rackspace controllers. What is this + for? Is this just Rackspace specific? """ + + @classmethod + def render(cls, instance): + if isinstance(instance, list): + return {cls.entity_name: cls.render(instance)} + else: + return {"TODO": "TODO"} diff --git a/nova/api/rackspace/controllers/flavors.py b/nova/api/rackspace/controllers/flavors.py new file mode 100644 index 0000000000..986f11434e --- /dev/null +++ b/nova/api/rackspace/controllers/flavors.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/api/rackspace/controllers/images.py b/nova/api/rackspace/controllers/images.py new file mode 100644 index 0000000000..986f11434e --- /dev/null +++ b/nova/api/rackspace/controllers/images.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/api/rackspace/controllers/servers.py similarity index 72% rename from nova/endpoint/rackspace/controllers/servers.py rename to nova/api/rackspace/controllers/servers.py index 2f8e662d69..1911d5abf7 100644 --- a/nova/endpoint/rackspace/controllers/servers.py +++ b/nova/api/rackspace/controllers/servers.py @@ -1,12 +1,32 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + from nova import rpc from nova.compute import model as compute -from nova.endpoint.rackspace.controllers.base import BaseController +from nova.api.rackspace.controllers import base -class ServersController(BaseController): + +class Controller(base.Controller): entity_name = 'servers' def index(self, **kwargs): - return [instance_details(inst) for inst in compute.InstanceDirectory().all] + instanmces = [] + for inst in compute.InstanceDirectory().all: + instances.append(instance_details(inst)) def show(self, **kwargs): instance_id = kwargs['id'] diff --git a/nova/api/rackspace/controllers/sharedipgroups.py b/nova/api/rackspace/controllers/sharedipgroups.py new file mode 100644 index 0000000000..986f11434e --- /dev/null +++ b/nova/api/rackspace/controllers/sharedipgroups.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +class Controller(object): pass diff --git a/nova/endpoint/aws/__init__.py b/nova/endpoint/aws/__init__.py deleted file mode 100644 index 55cbb8fd3a..0000000000 --- a/nova/endpoint/aws/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -import routes -import webob.dec - -from nova import wsgi - -# TODO(gundlach): temp -class API(wsgi.Router): - """WSGI entry point for all AWS API requests.""" - - def __init__(self): - mapper = routes.Mapper() - - mapper.connect(None, "{all:.*}", controller=self.dummy) - - super(API, self).__init__(mapper) - - @webob.dec.wsgify - def dummy(self, req): - #TODO(gundlach) - msg = "dummy response -- please hook up __init__() to cloud.py instead" - return repr({ 'dummy': msg, - 'kwargs': repr(req.environ['wsgiorg.routing_args'][1]) }) diff --git a/nova/endpoint/newapi.py b/nova/endpoint/newapi.py deleted file mode 100644 index 9aae933afb..0000000000 --- a/nova/endpoint/newapi.py +++ /dev/null @@ -1,51 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`nova.endpoint` -- Main NOVA Api endpoints -===================================================== - -.. automodule:: nova.endpoint - :platform: Unix - :synopsis: REST APIs for all nova functions -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith -""" - -from nova import wsgi -import routes -from nova.endpoint import rackspace -from nova.endpoint import aws - -class APIVersionRouter(wsgi.Router): - """Routes top-level requests to the appropriate API.""" - - def __init__(self): - mapper = routes.Mapper() - - rsapi = rackspace.API() - mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - - mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - - super(APIVersionRouter, self).__init__(mapper) - diff --git a/nova/endpoint/rackspace/controllers/__init__.py b/nova/endpoint/rackspace/controllers/__init__.py deleted file mode 100644 index 052b6f3655..0000000000 --- a/nova/endpoint/rackspace/controllers/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from nova.endpoint.rackspace.controllers.images import ImagesController -from nova.endpoint.rackspace.controllers.flavors import FlavorsController -from nova.endpoint.rackspace.controllers.servers import ServersController -from nova.endpoint.rackspace.controllers.sharedipgroups import \ - SharedIpGroupsController diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py deleted file mode 100644 index 8cd44f62e9..0000000000 --- a/nova/endpoint/rackspace/controllers/base.py +++ /dev/null @@ -1,9 +0,0 @@ -from nova import wsgi - -class BaseController(wsgi.Controller): - @classmethod - def render(cls, instance): - if isinstance(instance, list): - return { cls.entity_name : cls.render(instance) } - else: - return { "TODO": "TODO" } diff --git a/nova/endpoint/rackspace/controllers/flavors.py b/nova/endpoint/rackspace/controllers/flavors.py deleted file mode 100644 index f256cc8529..0000000000 --- a/nova/endpoint/rackspace/controllers/flavors.py +++ /dev/null @@ -1 +0,0 @@ -class FlavorsController(object): pass diff --git a/nova/endpoint/rackspace/controllers/images.py b/nova/endpoint/rackspace/controllers/images.py deleted file mode 100644 index ae2a08849c..0000000000 --- a/nova/endpoint/rackspace/controllers/images.py +++ /dev/null @@ -1 +0,0 @@ -class ImagesController(object): pass diff --git a/nova/endpoint/rackspace/controllers/sharedipgroups.py b/nova/endpoint/rackspace/controllers/sharedipgroups.py deleted file mode 100644 index 9d346d6232..0000000000 --- a/nova/endpoint/rackspace/controllers/sharedipgroups.py +++ /dev/null @@ -1 +0,0 @@ -class SharedIpGroupsController(object): pass From 738bcb7d381a67b0884d861c7ad48fa08e37106a Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 10:39:59 -0400 Subject: [PATCH 097/101] Newest pylint supports 'disable=', not 'disable-msg=' --- bin/nova-rsapi | 2 +- nova/test.py | 10 +++++----- nova/tests/objectstore_unittest.py | 16 ++++++++-------- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/bin/nova-rsapi b/bin/nova-rsapi index e2722422ec..9ad6f9e947 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -1,5 +1,5 @@ #!/usr/bin/env python -# pylint: disable-msg=C0103 +# pylint: disable=C0103 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the diff --git a/nova/test.py b/nova/test.py index c392c8a84d..a75e0de1a5 100644 --- a/nova/test.py +++ b/nova/test.py @@ -53,7 +53,7 @@ def skip_if_fake(func): class TrialTestCase(unittest.TestCase): """Test case base class for all unit tests""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Run before each test method to initialize test environment""" super(TrialTestCase, self).setUp() @@ -63,7 +63,7 @@ class TrialTestCase(unittest.TestCase): self.stubs = stubout.StubOutForTesting() self.flag_overrides = {} - def tearDown(self): # pylint: disable-msg=C0103 + def tearDown(self): # pylint: disable=C0103 """Runs after each test method to finalize/tear down test environment""" super(TrialTestCase, self).tearDown() self.reset_flags() @@ -94,7 +94,7 @@ class TrialTestCase(unittest.TestCase): class BaseTestCase(TrialTestCase): # TODO(jaypipes): Can this be moved into the TrialTestCase class? """Base test case class for all unit tests.""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Run before each test method to initialize test environment""" super(BaseTestCase, self).setUp() # TODO(termie): we could possibly keep a more global registry of @@ -106,7 +106,7 @@ class BaseTestCase(TrialTestCase): self._done_waiting = False self._timed_out = False - def tearDown(self):# pylint: disable-msg=C0103 + def tearDown(self):# pylint: disable=C0103 """Runs after each test method to finalize/tear down test environment""" super(BaseTestCase, self).tearDown() for x in self.injected: @@ -137,7 +137,7 @@ class BaseTestCase(TrialTestCase): if self._waiting: try: self.ioloop.remove_timeout(self._waiting) - except Exception: # pylint: disable-msg=W0703 + except Exception: # pylint: disable=W0703 # TODO(jaypipes): This produces a pylint warning. Should # we really be catching Exception and then passing here? pass diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index dece4b5d52..5b956fccfc 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -56,7 +56,7 @@ os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets')) class ObjectStoreTestCase(test.BaseTestCase): """Test objectstore API directly.""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Setup users and projects.""" super(ObjectStoreTestCase, self).setUp() self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), @@ -78,7 +78,7 @@ class ObjectStoreTestCase(test.BaseTestCase): self.context = Context() - def tearDown(self): # pylint: disable-msg=C0103 + def tearDown(self): # pylint: disable=C0103 """Tear down users and projects.""" self.auth_manager.delete_project('proj1') self.auth_manager.delete_project('proj2') @@ -168,7 +168,7 @@ class ObjectStoreTestCase(test.BaseTestCase): class TestHTTPChannel(http.HTTPChannel): """Dummy site required for twisted.web""" - def checkPersistence(self, _, __): # pylint: disable-msg=C0103 + def checkPersistence(self, _, __): # pylint: disable=C0103 """Otherwise we end up with an unclean reactor.""" return False @@ -181,7 +181,7 @@ class TestSite(server.Site): class S3APITestCase(test.TrialTestCase): """Test objectstore through S3 API.""" - def setUp(self): # pylint: disable-msg=C0103 + def setUp(self): # pylint: disable=C0103 """Setup users, projects, and start a test server.""" super(S3APITestCase, self).setUp() @@ -198,7 +198,7 @@ class S3APITestCase(test.TrialTestCase): root = S3() self.site = TestSite(root) - # pylint: disable-msg=E1101 + # pylint: disable=E1101 self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1') # pylint: enable-msg=E1101 @@ -221,11 +221,11 @@ class S3APITestCase(test.TrialTestCase): self.conn.get_http_connection = get_http_connection - def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111 + def _ensure_no_buckets(self, buckets): # pylint: disable=C0111 self.assertEquals(len(buckets), 0, "Bucket list was not empty") return True - def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111 + def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111 self.assertEquals(len(buckets), 1, "Bucket list didn't have exactly one element in it") self.assertEquals(buckets[0].name, name, "Wrong name") @@ -296,7 +296,7 @@ class S3APITestCase(test.TrialTestCase): deferred.addCallback(self._ensure_no_buckets) return deferred - def tearDown(self): # pylint: disable-msg=C0103 + def tearDown(self): # pylint: disable=C0103 """Tear down auth and test server.""" self.auth_manager.delete_user('admin') self.auth_manager.delete_project('admin') From 43d2310f87a2f78f342b171de403f3db74a98295 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 08:39:28 -0700 Subject: [PATCH 098/101] Fixed typo. --- nova/api/rackspace/controllers/servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/rackspace/controllers/servers.py b/nova/api/rackspace/controllers/servers.py index 1911d5abf7..1d0221ea85 100644 --- a/nova/api/rackspace/controllers/servers.py +++ b/nova/api/rackspace/controllers/servers.py @@ -24,7 +24,7 @@ class Controller(base.Controller): entity_name = 'servers' def index(self, **kwargs): - instanmces = [] + instances = [] for inst in compute.InstanceDirectory().all: instances.append(instance_details(inst)) From b380e4a93f6d8ebc772c3989d27f9549b730eee5 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 11:44:24 -0400 Subject: [PATCH 099/101] Changed our minds: keep pylint equal to Ubuntu Lucid version, and use disable-msg throughout. --- bin/nova-rsapi | 2 +- nova/network/linux_net.py | 4 ++-- nova/network/model.py | 8 ++++---- nova/network/service.py | 2 +- nova/network/vpn.py | 2 +- nova/rpc.py | 8 ++++---- nova/test.py | 10 +++++----- nova/tests/network_unittest.py | 4 ++-- nova/tests/objectstore_unittest.py | 16 ++++++++-------- nova/tests/rpc_unittest.py | 2 +- nova/wsgi.py | 4 ++-- tools/pip-requires | 2 +- 12 files changed, 32 insertions(+), 32 deletions(-) diff --git a/bin/nova-rsapi b/bin/nova-rsapi index 9ad6f9e947..e2722422ec 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -1,5 +1,5 @@ #!/usr/bin/env python -# pylint: disable=C0103 +# pylint: disable-msg=C0103 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index a5014b2cb5..9e5aabd97e 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -154,7 +154,7 @@ def start_dnsmasq(network): try: os.kill(pid, signal.SIGHUP) return - except Exception as exc: # pylint: disable=W0703 + except Exception as exc: # pylint: disable-msg=W0703 logging.debug("Hupping dnsmasq threw %s", exc) # FLAGFILE and DNSMASQ_INTERFACE in env @@ -170,7 +170,7 @@ def stop_dnsmasq(network): if pid: try: os.kill(pid, signal.SIGTERM) - except Exception as exc: # pylint: disable=W0703 + except Exception as exc: # pylint: disable-msg=W0703 logging.debug("Killing dnsmasq threw %s", exc) diff --git a/nova/network/model.py b/nova/network/model.py index d3a6a65524..6e4fcc47ea 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -59,7 +59,7 @@ logging.getLogger().setLevel(logging.DEBUG) class Vlan(datastore.BasicModel): """Tracks vlans assigned to project it the datastore""" - def __init__(self, project, vlan): # pylint: disable=W0231 + def __init__(self, project, vlan): # pylint: disable-msg=W0231 """ Since we don't want to try and find a vlan by its identifier, but by a project id, we don't call super-init. @@ -161,7 +161,7 @@ class FixedIp(datastore.BasicModel): 'state': 'none'} @classmethod - # pylint: disable=R0913 + # pylint: disable-msg=R0913 def create(cls, user_id, project_id, address, mac, hostname, network_id): """Creates an FixedIp object""" addr = cls(address) @@ -215,7 +215,7 @@ class BaseNetwork(datastore.BasicModel): return {'network_id': self.network_id, 'network_str': self.network_str} @classmethod - # pylint: disable=R0913 + # pylint: disable-msg=R0913 def create(cls, user_id, project_id, security_group, vlan, network_str): """Create a BaseNetwork object""" network_id = "%s:%s" % (project_id, security_group) @@ -268,7 +268,7 @@ class BaseNetwork(datastore.BasicModel): """Returns the project associated with this network""" return manager.AuthManager().get_project(self['project_id']) - # pylint: disable=R0913 + # pylint: disable-msg=R0913 def _add_host(self, user_id, project_id, ip_address, mac, hostname): """Add a host to the datastore""" self.address_class.create(user_id, project_id, ip_address, diff --git a/nova/network/service.py b/nova/network/service.py index da102a0562..d3aa1c46f5 100644 --- a/nova/network/service.py +++ b/nova/network/service.py @@ -195,7 +195,7 @@ class VlanNetworkService(BaseNetworkService): # simplified and improved. Also there it may be useful # to support vlans separately from dhcp, instead of having # both of them together in this class. - # pylint: disable=W0221 + # pylint: disable-msg=W0221 def allocate_fixed_ip(self, user_id, project_id, diff --git a/nova/network/vpn.py b/nova/network/vpn.py index cf2579e61c..85366ed892 100644 --- a/nova/network/vpn.py +++ b/nova/network/vpn.py @@ -105,7 +105,7 @@ class NetworkData(datastore.BasicModel): return datastore.Redis.instance().scard('ip:%s:ports' % vpn_ip) @property - def ip(self): # pylint: disable=C0103 + def ip(self): # pylint: disable-msg=C0103 """The ip assigned to the project""" return self['ip'] diff --git a/nova/rpc.py b/nova/rpc.py index 824a66b5b8..84a9b55901 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -59,7 +59,7 @@ class Connection(carrot_connection.BrokerConnection): params['backend_cls'] = fakerabbit.Backend # NOTE(vish): magic is fun! - # pylint: disable=W0142 + # pylint: disable-msg=W0142 cls._instance = cls(**params) return cls._instance @@ -104,7 +104,7 @@ class Consumer(messaging.Consumer): if self.failed_connection: # NOTE(vish): conn is defined in the parent class, we can # recreate it as long as we create the backend too - # pylint: disable=W0201 + # pylint: disable-msg=W0201 self.conn = Connection.recreate() self.backend = self.conn.create_backend() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) @@ -114,7 +114,7 @@ class Consumer(messaging.Consumer): # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. - except Exception: # pylint: disable=W0703 + except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: logging.exception("Failed to fetch message from queue") self.failed_connection = True @@ -178,7 +178,7 @@ class AdapterConsumer(TopicConsumer): node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) # NOTE(vish): magic is fun! - # pylint: disable=W0142 + # pylint: disable-msg=W0142 d = defer.maybeDeferred(node_func, **node_args) if msg_id: d.addCallback(lambda rval: msg_reply(msg_id, rval, None)) diff --git a/nova/test.py b/nova/test.py index a75e0de1a5..c392c8a84d 100644 --- a/nova/test.py +++ b/nova/test.py @@ -53,7 +53,7 @@ def skip_if_fake(func): class TrialTestCase(unittest.TestCase): """Test case base class for all unit tests""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Run before each test method to initialize test environment""" super(TrialTestCase, self).setUp() @@ -63,7 +63,7 @@ class TrialTestCase(unittest.TestCase): self.stubs = stubout.StubOutForTesting() self.flag_overrides = {} - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 """Runs after each test method to finalize/tear down test environment""" super(TrialTestCase, self).tearDown() self.reset_flags() @@ -94,7 +94,7 @@ class TrialTestCase(unittest.TestCase): class BaseTestCase(TrialTestCase): # TODO(jaypipes): Can this be moved into the TrialTestCase class? """Base test case class for all unit tests.""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Run before each test method to initialize test environment""" super(BaseTestCase, self).setUp() # TODO(termie): we could possibly keep a more global registry of @@ -106,7 +106,7 @@ class BaseTestCase(TrialTestCase): self._done_waiting = False self._timed_out = False - def tearDown(self):# pylint: disable=C0103 + def tearDown(self):# pylint: disable-msg=C0103 """Runs after each test method to finalize/tear down test environment""" super(BaseTestCase, self).tearDown() for x in self.injected: @@ -137,7 +137,7 @@ class BaseTestCase(TrialTestCase): if self._waiting: try: self.ioloop.remove_timeout(self._waiting) - except Exception: # pylint: disable=W0703 + except Exception: # pylint: disable-msg=W0703 # TODO(jaypipes): This produces a pylint warning. Should # we really be catching Exception and then passing here? pass diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 0395098092..993bfacc20 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -36,7 +36,7 @@ FLAGS = flags.FLAGS class NetworkTestCase(test.TrialTestCase): """Test cases for network code""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 super(NetworkTestCase, self).setUp() # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge @@ -60,7 +60,7 @@ class NetworkTestCase(test.TrialTestCase): vpn.NetworkData.create(self.projects[i].id) self.service = service.VlanNetworkService() - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 super(NetworkTestCase, self).tearDown() for project in self.projects: self.manager.delete_project(project) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index 5b956fccfc..dece4b5d52 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -56,7 +56,7 @@ os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets')) class ObjectStoreTestCase(test.BaseTestCase): """Test objectstore API directly.""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Setup users and projects.""" super(ObjectStoreTestCase, self).setUp() self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), @@ -78,7 +78,7 @@ class ObjectStoreTestCase(test.BaseTestCase): self.context = Context() - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 """Tear down users and projects.""" self.auth_manager.delete_project('proj1') self.auth_manager.delete_project('proj2') @@ -168,7 +168,7 @@ class ObjectStoreTestCase(test.BaseTestCase): class TestHTTPChannel(http.HTTPChannel): """Dummy site required for twisted.web""" - def checkPersistence(self, _, __): # pylint: disable=C0103 + def checkPersistence(self, _, __): # pylint: disable-msg=C0103 """Otherwise we end up with an unclean reactor.""" return False @@ -181,7 +181,7 @@ class TestSite(server.Site): class S3APITestCase(test.TrialTestCase): """Test objectstore through S3 API.""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 """Setup users, projects, and start a test server.""" super(S3APITestCase, self).setUp() @@ -198,7 +198,7 @@ class S3APITestCase(test.TrialTestCase): root = S3() self.site = TestSite(root) - # pylint: disable=E1101 + # pylint: disable-msg=E1101 self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1') # pylint: enable-msg=E1101 @@ -221,11 +221,11 @@ class S3APITestCase(test.TrialTestCase): self.conn.get_http_connection = get_http_connection - def _ensure_no_buckets(self, buckets): # pylint: disable=C0111 + def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111 self.assertEquals(len(buckets), 0, "Bucket list was not empty") return True - def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111 + def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111 self.assertEquals(len(buckets), 1, "Bucket list didn't have exactly one element in it") self.assertEquals(buckets[0].name, name, "Wrong name") @@ -296,7 +296,7 @@ class S3APITestCase(test.TrialTestCase): deferred.addCallback(self._ensure_no_buckets) return deferred - def tearDown(self): # pylint: disable=C0103 + def tearDown(self): # pylint: disable-msg=C0103 """Tear down auth and test server.""" self.auth_manager.delete_user('admin') self.auth_manager.delete_project('admin') diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index 764a97416f..e12a28fbc6 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -32,7 +32,7 @@ FLAGS = flags.FLAGS class RpcTestCase(test.BaseTestCase): """Test cases for rpc""" - def setUp(self): # pylint: disable=C0103 + def setUp(self): # pylint: disable-msg=C0103 super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance() self.receiver = TestReceiver() diff --git a/nova/wsgi.py b/nova/wsgi.py index a0a175dc74..fd87afe6e6 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -83,7 +83,7 @@ class Application(object): raise NotImplementedError("You must implement __call__") -class Middleware(Application): # pylint: disable=W0223 +class Middleware(Application): # pylint: disable-msg=W0223 """ Base WSGI middleware wrapper. These classes require an application to be initialized that will be called next. By default the middleware will @@ -91,7 +91,7 @@ class Middleware(Application): # pylint: disable=W0223 behavior. """ - def __init__(self, application): # pylint: disable=W0231 + def __init__(self, application): # pylint: disable-msg=W0231 self.application = application @webob.dec.wsgify diff --git a/tools/pip-requires b/tools/pip-requires index 28af7bcb96..13e8e5f451 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,5 +1,5 @@ pep8==0.5.0 -pylint==0.21.1 +pylint==0.19 IPy==0.70 M2Crypto==0.20.2 amqplib==0.6.1 From 4e5e72da2e3242026d757c8d5143e16f9d00cb6a Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 18 Aug 2010 08:56:33 -0700 Subject: [PATCH 100/101] Removed the 'controllers' directory under 'rackspace' due to full class name redundancy. --- nova/api/rackspace/__init__.py | 8 ++++---- nova/api/rackspace/{controllers => }/base.py | 0 nova/api/rackspace/controllers/__init__.py | 0 nova/api/rackspace/{controllers => }/flavors.py | 0 nova/api/rackspace/{controllers => }/images.py | 0 nova/api/rackspace/{controllers => }/servers.py | 2 +- nova/api/rackspace/{controllers => }/sharedipgroups.py | 0 7 files changed, 5 insertions(+), 5 deletions(-) rename nova/api/rackspace/{controllers => }/base.py (100%) delete mode 100644 nova/api/rackspace/controllers/__init__.py rename nova/api/rackspace/{controllers => }/flavors.py (100%) rename nova/api/rackspace/{controllers => }/images.py (100%) rename nova/api/rackspace/{controllers => }/servers.py (98%) rename nova/api/rackspace/{controllers => }/sharedipgroups.py (100%) diff --git a/nova/api/rackspace/__init__.py b/nova/api/rackspace/__init__.py index 662cbe4958..27e78f8011 100644 --- a/nova/api/rackspace/__init__.py +++ b/nova/api/rackspace/__init__.py @@ -29,10 +29,10 @@ import webob.exc from nova import flags from nova import wsgi -from nova.api.rackspace.controllers import flavors -from nova.api.rackspace.controllers import images -from nova.api.rackspace.controllers import servers -from nova.api.rackspace.controllers import sharedipgroups +from nova.api.rackspace import flavors +from nova.api.rackspace import images +from nova.api.rackspace import servers +from nova.api.rackspace import sharedipgroups from nova.auth import manager diff --git a/nova/api/rackspace/controllers/base.py b/nova/api/rackspace/base.py similarity index 100% rename from nova/api/rackspace/controllers/base.py rename to nova/api/rackspace/base.py diff --git a/nova/api/rackspace/controllers/__init__.py b/nova/api/rackspace/controllers/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/nova/api/rackspace/controllers/flavors.py b/nova/api/rackspace/flavors.py similarity index 100% rename from nova/api/rackspace/controllers/flavors.py rename to nova/api/rackspace/flavors.py diff --git a/nova/api/rackspace/controllers/images.py b/nova/api/rackspace/images.py similarity index 100% rename from nova/api/rackspace/controllers/images.py rename to nova/api/rackspace/images.py diff --git a/nova/api/rackspace/controllers/servers.py b/nova/api/rackspace/servers.py similarity index 98% rename from nova/api/rackspace/controllers/servers.py rename to nova/api/rackspace/servers.py index 1d0221ea85..25d1fe9c8a 100644 --- a/nova/api/rackspace/controllers/servers.py +++ b/nova/api/rackspace/servers.py @@ -17,7 +17,7 @@ from nova import rpc from nova.compute import model as compute -from nova.api.rackspace.controllers import base +from nova.api.rackspace import base class Controller(base.Controller): diff --git a/nova/api/rackspace/controllers/sharedipgroups.py b/nova/api/rackspace/sharedipgroups.py similarity index 100% rename from nova/api/rackspace/controllers/sharedipgroups.py rename to nova/api/rackspace/sharedipgroups.py From 7cd16b5754a38257d6b492bc29e6f99f2537f11a Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 18 Aug 2010 12:09:29 -0400 Subject: [PATCH 101/101] Missed one --- pylintrc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pylintrc b/pylintrc index 6c799c7ea7..943eeac364 100644 --- a/pylintrc +++ b/pylintrc @@ -1,9 +1,9 @@ [Messages Control] -disable=C0103 +disable-msg=C0103 # TODOs in code comments are fine... -disable=W0511 +disable-msg=W0511 # *args and **kwargs are fine -disable=W0142 +disable-msg=W0142 [Basic] # Variables can be 1 to 31 characters long, with