diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 036c6bff7d..0c73539f1c 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -76,7 +76,7 @@ CONF.register_opts(ec2_opts) CONF.import_opt('use_forwarded_for', 'nova.api.auth') -## Fault Wrapper around all EC2 requests ## +# Fault Wrapper around all EC2 requests class FaultWrapper(wsgi.Middleware): """Calls the middleware stack, captures any exceptions into faults.""" diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index aea4c7aafe..c3014aabbf 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -411,7 +411,7 @@ class CloudController(object): if key_name is not None: key_pairs = [x for x in key_pairs if x['name'] in key_name] - #If looking for non existent key pair + # If looking for non existent key pair if key_name is not None and not key_pairs: msg = _('Could not find key pair(s): %s') % ','.join(key_name) raise exception.KeypairNotFound(message=msg) diff --git a/nova/api/openstack/compute/contrib/server_diagnostics.py b/nova/api/openstack/compute/contrib/server_diagnostics.py index 9da8e1e753..215f6f4fac 100644 --- a/nova/api/openstack/compute/contrib/server_diagnostics.py +++ b/nova/api/openstack/compute/contrib/server_diagnostics.py @@ -65,7 +65,7 @@ class Server_diagnostics(extensions.ExtensionDescriptor): def get_resources(self): parent_def = {'member_name': 'server', 'collection_name': 'servers'} - #NOTE(bcwaldon): This should be prefixed with 'os-' + # NOTE(bcwaldon): This should be prefixed with 'os-' ext = extensions.ResourceExtension('diagnostics', ServerDiagnosticsController(), parent=parent_def) diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index 505d651b8b..7be8a97022 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -454,7 +454,7 @@ class ServersController(wsgi.Controller): # Replace with an extension point when the os-networks # extension is ported. Currently reworked # to take into account is_neutron - #if (self.ext_mgr.is_loaded('os-networks') + # if (self.ext_mgr.is_loaded('os-networks') # or utils.is_neutron()): # requested_networks = server_dict.get('networks') diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index c63cae40ea..5077b188b3 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -692,9 +692,9 @@ class Controller(wsgi.Controller): "(%s)") % network_uuid raise exc.HTTPBadRequest(explanation=msg) - #fixed IP address is optional - #if the fixed IP address is not provided then - #it will use one of the available IP address from the network + # fixed IP address is optional + # if the fixed IP address is not provided then + # it will use one of the available IP address from the network address = network.get('fixed_ip', None) if address is not None and not utils.is_valid_ip_address( address): diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 531213e158..9dbf3bc357 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -373,7 +373,7 @@ class XMLDictSerializer(DictSerializer): self._add_xmlns(node, has_atom) return node.toxml('UTF-8') - #NOTE (ameade): the has_atom should be removed after all of the + # NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current # spec that required all responses include the xmlns:atom, the has_atom # flag is to prevent current tests from breaking @@ -393,7 +393,7 @@ class XMLDictSerializer(DictSerializer): if xmlns: result.setAttribute('xmlns', xmlns) - #TODO(bcwaldon): accomplish this without a type-check + # TODO(bcwaldon): accomplish this without a type-check if isinstance(data, list): collections = metadata.get('list_collections', {}) if nodename in collections: @@ -412,7 +412,7 @@ class XMLDictSerializer(DictSerializer): for item in data: node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) - #TODO(bcwaldon): accomplish this without a type-check + # TODO(bcwaldon): accomplish this without a type-check elif isinstance(data, dict): collections = metadata.get('dict_collections', {}) if nodename in collections: @@ -937,7 +937,7 @@ class Resource(wsgi.Application): try: contents = {} if self._should_have_body(request): - #allow empty body with PUT and POST + # allow empty body with PUT and POST if request.content_length == 0: contents = {'body': None} else: diff --git a/nova/api/openstack/xmlutil.py b/nova/api/openstack/xmlutil.py index b401d83525..679b873a16 100644 --- a/nova/api/openstack/xmlutil.py +++ b/nova/api/openstack/xmlutil.py @@ -990,7 +990,7 @@ def safe_minidom_parse_string(xml_string): return minidom.parseString(xml_string, parser=ProtectedExpatParser()) except (sax.SAXParseException, ValueError, expat.ExpatError, LookupError) as e: - #NOTE(Vijaya Erukala): XML input such as + # NOTE(Vijaya Erukala): XML input such as # # raises LookupError: unknown encoding: TF-8 raise exception.MalformedRequestBody(reason=str(e)) diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py index 1fab96b3df..aa5c42e6aa 100644 --- a/nova/api/sizelimit.py +++ b/nova/api/sizelimit.py @@ -24,7 +24,7 @@ from nova.i18n import _ from nova import wsgi -#default request size is 112k +# default request size is 112k max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', default=114688, help='The maximum body size ' diff --git a/nova/cells/state.py b/nova/cells/state.py index aa2257d397..66a7fd14a9 100644 --- a/nova/cells/state.py +++ b/nova/cells/state.py @@ -55,7 +55,6 @@ CONF = cfg.CONF CONF.import_opt('name', 'nova.cells.opts', group='cells') CONF.import_opt('reserve_percent', 'nova.cells.opts', group='cells') CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells') -#CONF.import_opt('capabilities', 'nova.cells.opts', group='cells') CONF.register_opts(cell_state_manager_opts, group='cells') diff --git a/nova/cmd/baremetal_deploy_helper.py b/nova/cmd/baremetal_deploy_helper.py index 348561af99..b4f546d1d9 100644 --- a/nova/cmd/baremetal_deploy_helper.py +++ b/nova/cmd/baremetal_deploy_helper.py @@ -134,7 +134,7 @@ def mkswap(dev, label='swap1'): def mkfs_ephemeral(dev, label="ephemeral0"): - #TODO(jogo) support non-default mkfs options as well + # TODO(jogo) support non-default mkfs options as well disk.mkfs("default", label, dev) diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py index 6cd599f3b0..125439c452 100644 --- a/nova/cmd/manage.py +++ b/nova/cmd/manage.py @@ -614,10 +614,10 @@ class NetworkCommands(object): admin_context = context.get_admin_context() network = db.network_get_by_cidr(admin_context, fixed_range) net = {} - #User can choose the following actions each for project and host. - #1) Associate (set not None value given by project/host parameter) - #2) Disassociate (set None by disassociate parameter) - #3) Keep unchanged (project/host key is not added to 'net') + # User can choose the following actions each for project and host. + # 1) Associate (set not None value given by project/host parameter) + # 2) Disassociate (set None by disassociate parameter) + # 3) Keep unchanged (project/host key is not added to 'net') if dis_project: net['project_id'] = None if dis_host: diff --git a/nova/compute/api.py b/nova/compute/api.py index 80ef8f350c..7cebe2c7e1 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1224,7 +1224,7 @@ class API(base.Base): security_groups) return instance - #NOTE(bcwaldon): No policy check since this is only used by scheduler and + # NOTE(bcwaldon): No policy check since this is only used by scheduler and # the compute api. That should probably be cleaned up, though. def create_db_entry_for_new_instance(self, context, instance_type, image, instance, security_group, block_device_mapping, num_instances, @@ -1833,7 +1833,7 @@ class API(base.Base): parameter. """ - #TODO(bcwaldon): determine the best argument for target here + # TODO(bcwaldon): determine the best argument for target here target = { 'project_id': context.project_id, 'user_id': context.user_id, @@ -3475,7 +3475,7 @@ class AggregateAPI(base.Base): aggregate.add_host(context, host_name) self._update_az_cache_for_host(context, host_name, aggregate.metadata) - #NOTE(jogo): Send message to host to support resource pools + # NOTE(jogo): Send message to host to support resource pools self.compute_rpcapi.add_aggregate_host(context, aggregate=aggregate, host_param=host_name, host=host_name) aggregate_payload.update({'name': aggregate['name']}) @@ -3799,7 +3799,7 @@ class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase): instance_uuid = instance['uuid'] - #check if the security group is associated with the server + # check if the security group is associated with the server if self.is_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupExistsForInstance( security_group_id=security_group['id'], @@ -3822,7 +3822,7 @@ class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase): instance_uuid = instance['uuid'] - #check if the security group is associated with the server + # check if the security group is associated with the server if not self.is_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupNotExistsForInstance( security_group_id=security_group['id'], diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 98f928111b..b5ca1c057d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1088,7 +1088,7 @@ class ComputeManager(manager.Manager): Currently this is just set in the flags for each compute host. """ - #TODO(mdragon): perhaps make this variable by console_type? + # TODO(mdragon): perhaps make this variable by console_type? return '%s.%s' % (CONF.console_topic, CONF.console_host) def get_console_pool_info(self, context, console_type): @@ -2583,7 +2583,7 @@ class ComputeManager(manager.Manager): # This instance.exists message should contain the original # image_ref, not the new one. Since the DB has been updated # to point to the new one... we have to override it. - #TODO(jaypipes): Move generate_image_url() into the nova.image.api + # TODO(jaypipes): Move generate_image_url() into the nova.image.api orig_image_ref_url = glance.generate_image_url(orig_image_ref) extra_usage_info = {'image_ref_url': orig_image_ref_url} self.conductor_api.notify_usage_exists(context, @@ -5400,9 +5400,9 @@ class ComputeManager(manager.Manager): 'num_vm_instances': num_vm_instances}) for db_instance in db_instances: - #NOTE(melwitt): This must be synchronized as we query state from - # two separate sources, the driver and the database. - # They are set (in stop_instance) and read, in sync. + # NOTE(melwitt): This must be synchronized as we query state from + # two separate sources, the driver and the database. + # They are set (in stop_instance) and read, in sync. @utils.synchronized(db_instance.uuid) def query_driver_power_state_and_sync(): self._query_driver_power_state_and_sync(context, db_instance) diff --git a/nova/compute/utils.py b/nova/compute/utils.py index 8836f5039d..1c593738c6 100644 --- a/nova/compute/utils.py +++ b/nova/compute/utils.py @@ -42,7 +42,7 @@ LOG = log.getLogger(__name__) def exception_to_dict(fault): """Converts exceptions to a dict for use in notifications.""" - #TODO(johngarbutt) move to nova/exception.py to share with wrap_exception + # TODO(johngarbutt) move to nova/exception.py to share with wrap_exception code = 500 if hasattr(fault, "kwargs"): diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index ba102f4e3d..d0ab4f981b 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -571,7 +571,7 @@ class ComputeTaskManager(base.Base): exception.InstanceNotRunning, exception.MigrationPreCheckError) as ex: with excutils.save_and_reraise_exception(): - #TODO(johngarbutt) - eventually need instance actions here + # TODO(johngarbutt) - eventually need instance actions here request_spec = {'instance_properties': { 'uuid': instance['uuid'], }, } diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py index f92f94e22f..7e016fd858 100644 --- a/nova/conductor/tasks/live_migrate.py +++ b/nova/conductor/tasks/live_migrate.py @@ -60,8 +60,8 @@ class LiveMigrationTask(object): else: self._check_requested_destination() - #TODO(johngarbutt) need to move complexity out of compute manager - #TODO(johngarbutt) disk_over_commit? + # TODO(johngarbutt) need to move complexity out of compute manager + # TODO(johngarbutt) disk_over_commit? return self.compute_rpcapi.live_migration(self.context, host=self.source, instance=self.instance, @@ -70,7 +70,7 @@ class LiveMigrationTask(object): migrate_data=self.migrate_data) def rollback(self): - #TODO(johngarbutt) need to implement the clean up operation + # TODO(johngarbutt) need to implement the clean up operation # but this will make sense only once we pull in the compute # calls, since this class currently makes no state changes, # except to call the compute method, that has no matching @@ -141,7 +141,7 @@ class LiveMigrationTask(object): destination, self.block_migration, self.disk_over_commit) def _find_destination(self): - #TODO(johngarbutt) this retry loop should be shared + # TODO(johngarbutt) this retry loop should be shared attempted_hosts = [self.source] image = None if self.instance.image_ref: @@ -187,5 +187,5 @@ def execute(context, instance, destination, destination, block_migration, disk_over_commit) - #TODO(johngarbutt) create a superclass that contains a safe_execute call + # TODO(johngarbutt) create a superclass that contains a safe_execute call return task.execute() diff --git a/nova/console/api.py b/nova/console/api.py index f3cb9d3b42..5b1dbb7050 100644 --- a/nova/console/api.py +++ b/nova/console/api.py @@ -46,11 +46,11 @@ class API(base.Base): rpcapi.remove_console(context, console['id']) def create_console(self, context, instance_uuid): - #NOTE(mdragon): If we wanted to return this the console info - # here, as we would need to do a call. - # They can just do an index later to fetch - # console info. I am not sure which is better - # here. + # NOTE(mdragon): If we wanted to return this the console info + # here, as we would need to do a call. + # They can just do an index later to fetch + # console info. I am not sure which is better + # here. instance = self._get_instance(context, instance_uuid) topic = self._get_console_topic(context, instance['host']) server = None diff --git a/nova/console/manager.py b/nova/console/manager.py index 2e66320a94..95c07352f7 100644 --- a/nova/console/manager.py +++ b/nova/console/manager.py @@ -110,9 +110,9 @@ class ConsoleProxyManager(manager.Manager): self.host, console_type) except exception.NotFound: - #NOTE(mdragon): Right now, the only place this info exists is the - # compute worker's flagfile, at least for - # xenserver. Thus we ned to ask. + # NOTE(mdragon): Right now, the only place this info exists is the + # compute worker's flagfile, at least for + # xenserver. Thus we ned to ask. if CONF.stub_compute: pool_info = {'address': '127.0.0.1', 'username': 'test', diff --git a/nova/console/xvp.py b/nova/console/xvp.py index 85e3f43235..48d860def8 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -69,7 +69,7 @@ class XVPConsoleProxy(object): def get_port(self, context): """Get available port for consoles that need one.""" - #TODO(mdragon): implement port selection for non multiplex ports, + # TODO(mdragon): implement port selection for non multiplex ports, # we are not using that, but someone else may want # it. return CONF.console_xvp_multiplex_port @@ -131,7 +131,7 @@ class XVPConsoleProxy(object): try: os.kill(pid, signal.SIGTERM) except OSError: - #if it's already not running, no problem. + # if it's already not running, no problem. pass def _xvp_start(self): @@ -196,7 +196,7 @@ class XVPConsoleProxy(object): if is_pool_password: maxlen = 16 flag = '-x' - #xvp will blow up on passwords that are too long (mdragon) + # xvp will blow up on passwords that are too long (mdragon) password = password[:maxlen] out, err = utils.execute('xvp', flag, process_input=password) if err: diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f9d62a632d..af8ebd2e81 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1759,7 +1759,7 @@ def _build_instance_get(context, session=None, # Already always joined above continue query = query.options(joinedload(column)) - #NOTE(alaski) Stop lazy loading of columns not needed. + # NOTE(alaski) Stop lazy loading of columns not needed. for col in ['metadata', 'system_metadata']: if col not in columns_to_join: query = query.options(noload(col)) @@ -2612,8 +2612,8 @@ def network_get_all_by_uuids(context, network_uuids, project_only): if not result: raise exception.NoNetworksFound() - #check if the result contains all the networks - #we are looking for + # check if the result contains all the networks + # we are looking for for network_uuid in network_uuids: for network in result: if network['uuid'] == network_uuid: @@ -5181,7 +5181,7 @@ def aggregate_delete(context, aggregate_id): if count == 0: raise exception.AggregateNotFound(aggregate_id=aggregate_id) - #Delete Metadata + # Delete Metadata model_query(context, models.AggregateMetadata, session=session).\ filter_by(aggregate_id=aggregate_id).\ @@ -5578,7 +5578,7 @@ def task_log_end_task(context, task_name, period_beginning, period_ending, period_ending, host, session=session).\ update(values) if rows == 0: - #It's not running! + # It's not running! raise exception.TaskNotRunning(task_name=task_name, host=host) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 739c8aaf28..ede89429cc 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -537,9 +537,10 @@ class BlockDeviceMapping(BASE, NovaBase): Index('block_device_mapping_instance_uuid_volume_id_idx', 'instance_uuid', 'volume_id'), Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'), - #TODO(sshturm) Should be dropped. `virtual_name` was dropped - #in 186 migration, - #Duplicates `block_device_mapping_instance_uuid_device_name_idx` index. + # TODO(sshturm) Should be dropped. `virtual_name` was dropped + # in 186 migration, + # Duplicates `block_device_mapping_instance_uuid_device_name_idx` + # index. Index("block_device_mapping_instance_uuid_virtual_name" "_device_name_idx", 'instance_uuid', 'device_name'), ) @@ -569,7 +570,7 @@ class BlockDeviceMapping(BASE, NovaBase): # With EC2 API, # default True for ami specified device. # default False for created with other timing. - #TODO(sshturm) add default in db + # TODO(sshturm) add default in db delete_on_termination = Column(Boolean, default=False) snapshot_id = Column(String(36)) @@ -735,7 +736,7 @@ class Migration(BASE, NovaBase): old_instance_type_id = Column(Integer()) new_instance_type_id = Column(Integer()) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) - #TODO(_cerberus_): enum + # TODO(_cerberus_): enum status = Column(String(255)) instance = relationship("Instance", foreign_keys=instance_uuid, @@ -838,12 +839,12 @@ class FixedIp(BASE, NovaBase): instance_uuid = Column(String(36), ForeignKey('instances.uuid')) # associated means that a fixed_ip has its instance_id column set # allocated means that a fixed_ip has its virtual_interface_id column set - #TODO(sshturm) add default in db + # TODO(sshturm) add default in db allocated = Column(Boolean, default=False) # leased means dhcp bridge has leased the ip - #TODO(sshturm) add default in db + # TODO(sshturm) add default in db leased = Column(Boolean, default=False) - #TODO(sshturm) add default in db + # TODO(sshturm) add default in db reserved = Column(Boolean, default=False) host = Column(String(255)) network = relationship(Network, @@ -879,7 +880,7 @@ class FloatingIp(BASE, NovaBase): project_id = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) auto_assigned = Column(Boolean, default=False) - #TODO(sshturm) add default in db + # TODO(sshturm) add default in db pool = Column(String(255)) interface = Column(String(255)) fixed_ip = relationship(FixedIp, diff --git a/nova/image/api.py b/nova/image/api.py index c5f983a6f4..3ececf2fc0 100644 --- a/nova/image/api.py +++ b/nova/image/api.py @@ -46,11 +46,11 @@ class API(object): :param context: The `nova.context.Context` object for the request """ - #TODO(jaypipes): Refactor glance.get_remote_image_service and - # glance.get_default_image_service into a single - # method that takes a context and actually respects - # it, returning a real session object that keeps - # the context alive... + # TODO(jaypipes): Refactor glance.get_remote_image_service and + # glance.get_default_image_service into a single + # method that takes a context and actually respects + # it, returning a real session object that keeps + # the context alive... return glance.get_default_image_service() def get_all(self, context, **kwargs): diff --git a/nova/image/download/file.py b/nova/image/download/file.py index 93ec551c65..a416835c87 100644 --- a/nova/image/download/file.py +++ b/nova/image/download/file.py @@ -70,7 +70,7 @@ class FileTransfer(xfer_base.TransferBase): desc_required_keys = ['id', 'mountpoint'] - #NOTE(jbresnah) because the group under which these options are added is + # NOTE(jbresnah) because the group under which these options are added is # dyncamically determined these options need to stay out of global space # or they will confuse generate_sample.sh filesystem_opts = [ @@ -143,7 +143,7 @@ class FileTransfer(xfer_base.TransferBase): def download(self, context, url_parts, dst_file, metadata, **kwargs): self.filesystems = self._get_options() if not self.filesystems: - #NOTE(jbresnah) when nothing is configured assume legacy behavior + # NOTE(jbresnah) when nothing is configured assume legacy behavior nova_mountpoint = '/' glance_mountpoint = '/' else: diff --git a/nova/image/glance.py b/nova/image/glance.py index 7d20ad277c..b28629a980 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -148,7 +148,7 @@ def _create_glance_client(context, host, port, use_ssl, version=1): params['token'] = context.auth_token params['identity_headers'] = generate_identity_headers(context) if utils.is_valid_ipv6(host): - #if so, it is ipv6 address, need to wrap it with '[]' + # if so, it is ipv6 address, need to wrap it with '[]' host = '[%s]' % host endpoint = '%s://%s:%s' % (scheme, host, port) return glanceclient.Client(str(version), endpoint, **params) @@ -250,7 +250,7 @@ class GlanceImageService(object): def __init__(self, client=None): self._client = client or GlanceClientWrapper() - #NOTE(jbresnah) build the table of download handlers at the beginning + # NOTE(jbresnah) build the table of download handlers at the beginning # so that operators can catch errors at load time rather than whenever # a user attempts to use a module. Note this cannot be done in glance # space when this python module is loaded because the download module @@ -366,7 +366,7 @@ class GlanceImageService(object): """Modify the given image with the new data.""" image_meta = _translate_to_glance(image_meta) image_meta['purge_props'] = purge_props - #NOTE(bcwaldon): id is not an editable field, but it is likely to be + # NOTE(bcwaldon): id is not an editable field, but it is likely to be # passed in by calling code. Let's be nice and ignore it. image_meta.pop('id', None) if data: @@ -529,7 +529,7 @@ def _convert_to_string(metadata): def _extract_attributes(image): - #NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform + # NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform # a get(), resulting in a useless request back to glance. This list is # therefore sorted, with dependent attributes as the end # 'deleted_at' depends on 'deleted' @@ -552,7 +552,7 @@ def _extract_attributes(image): # image may not have 'name' attr elif attr == 'name': output[attr] = getattr(image, attr, None) - #NOTE(liusheng): queued image may not have these attributes and 'name' + # NOTE(liusheng): queued image may not have these attributes and 'name' elif queued and attr in queued_exclude_attrs: output[attr] = getattr(image, attr, None) else: @@ -624,7 +624,7 @@ def get_remote_image_service(context, image_href): :returns: a tuple of the form (image_service, image_id) """ - #NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a + # NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a # standalone image ID if '/' not in str(image_href): image_service = get_default_image_service() diff --git a/nova/image/s3.py b/nova/image/s3.py index 05a532baee..fa7278e257 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -160,7 +160,7 @@ class S3ImageService(object): return self._translate_uuid_to_id(context, image) def detail(self, context, **kwargs): - #NOTE(bcwaldon): sort asc to make sure we assign lower ids + # NOTE(bcwaldon): sort asc to make sure we assign lower ids # to older images kwargs.setdefault('sort_dir', 'asc') images = self.service.detail(context, **kwargs) @@ -264,7 +264,7 @@ class S3ImageService(object): 'properties': properties}) metadata['properties']['image_state'] = 'pending' - #TODO(bcwaldon): right now, this removes user-defined ids. + # TODO(bcwaldon): right now, this removes user-defined ids. # We need to re-enable this. metadata.pop('id', None) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 84885be6d5..e88cf25f67 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -980,7 +980,7 @@ def get_dhcp_opts(context, network_ref): vifs = objects.VirtualInterfaceList.get_by_instance_uuid( context, instance_uuid) if vifs: - #offer a default gateway to the first virtual interface + # offer a default gateway to the first virtual interface default_gw_vif[instance_uuid] = vifs[0].id for fixedip in fixedips: diff --git a/nova/network/manager.py b/nova/network/manager.py index ac83165c2b..4e4ae7fc70 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -1372,7 +1372,7 @@ class NetworkManager(manager.Manager): for vif in vifs: network = objects.Network.get_by_id(context, vif.network_id) if not network.multi_host: - #NOTE (tr3buchet): if using multi_host, host is instance[host] + # NOTE (tr3buchet): if using multi_host, host is instance[host] host = network['host'] if self.host == host or host is None: # at this point i am the correct host, or host doesn't diff --git a/nova/network/nova_ipam_lib.py b/nova/network/nova_ipam_lib.py index 58afe93d23..295af08c4c 100644 --- a/nova/network/nova_ipam_lib.py +++ b/nova/network/nova_ipam_lib.py @@ -50,9 +50,9 @@ class NeutronNovaIPAMLib(object): 'version': 4, 'dns1': n.dns1, 'dns2': n.dns2} - #TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4. - # this is probably bad as there is no way to add v6 - # dns to nova + # TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4. + # this is probably bad as there is no way to add v6 + # dns to nova subnet_v6 = { 'network_id': n.uuid, 'cidr': n.cidr_v6, diff --git a/nova/objects/fields.py b/nova/objects/fields.py index fad6011c5e..27eb12fd96 100644 --- a/nova/objects/fields.py +++ b/nova/objects/fields.py @@ -405,10 +405,10 @@ class Dict(CompoundFieldType): raise ValueError(_('A dict is required here')) for key, element in value.items(): if not isinstance(key, six.string_types): - #NOTE(guohliu) In order to keep compatibility with python3 - #we need to use six.string_types rather than basestring here, - #since six.string_types is a tuple, so we need to pass the - #real type in. + # NOTE(guohliu) In order to keep compatibility with python3 + # we need to use six.string_types rather than basestring here, + # since six.string_types is a tuple, so we need to pass the + # real type in. raise KeyTypeError(six.string_types[0], key) value[key] = self._element_type.coerce( obj, '%s["%s"]' % (attr, key), element) diff --git a/nova/safe_utils.py b/nova/safe_utils.py index a6d2734733..ce9499bf80 100644 --- a/nova/safe_utils.py +++ b/nova/safe_utils.py @@ -30,7 +30,7 @@ def getcallargs(function, *args, **kwargs): keyed_args.update(kwargs) - #NOTE(alaski) the implicit 'self' or 'cls' argument shows up in + # NOTE(alaski) the implicit 'self' or 'cls' argument shows up in # argnames but not in args or kwargs. Uses 'in' rather than '==' because # some tests use 'self2'. if 'self' in argnames[0] or 'cls' == argnames[0]: diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py index 9cea311ebf..2708b843e2 100644 --- a/nova/scheduler/host_manager.py +++ b/nova/scheduler/host_manager.py @@ -149,9 +149,9 @@ class HostState(object): self.service = ReadOnlyDict(service) def _update_metrics_from_compute_node(self, compute): - #NOTE(llu): The 'or []' is to avoid json decode failure of None - # returned from compute.get, because DB schema allows - # NULL in the metrics column + # NOTE(llu): The 'or []' is to avoid json decode failure of None + # returned from compute.get, because DB schema allows + # NULL in the metrics column metrics = compute.get('metrics', []) or [] if metrics: metrics = jsonutils.loads(metrics) @@ -189,7 +189,7 @@ class HostState(object): self.disk_mb_used = compute['local_gb_used'] * 1024 - #NOTE(jogo) free_ram_mb can be negative + # NOTE(jogo) free_ram_mb can be negative self.free_ram_mb = compute['free_ram_mb'] self.total_usable_ram_mb = all_ram_mb self.total_usable_disk_gb = compute['local_gb'] diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py index bb48b68312..6a8c51dc1d 100644 --- a/nova/tests/api/ec2/test_cinder_cloud.py +++ b/nova/tests/api/ec2/test_cinder_cloud.py @@ -852,7 +852,7 @@ class CinderCloudTestCase(test.TestCase): self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") - #Here we puke... + # Here we puke... self.cloud.terminate_instances(self.context, [ec2_instance_id]) admin_ctxt = context.get_admin_context(read_deleted="no") @@ -990,7 +990,7 @@ class CinderCloudTestCase(test.TestCase): self._assert_volume_attached(vol, instance_uuid, mountpoint) - #Just make sure we found them + # Just make sure we found them self.assertTrue(vol1_id) self.assertTrue(vol2_id) diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py index 4f1a11481d..841def64e9 100644 --- a/nova/tests/api/ec2/test_ec2_validate.py +++ b/nova/tests/api/ec2/test_ec2_validate.py @@ -106,7 +106,7 @@ class EC2ValidateTestCase(test.TestCase): super(EC2ValidateTestCase, self).tearDown() fake.FakeImageService_reset() - #EC2_API tests (InvalidInstanceID.Malformed) + # EC2_API tests (InvalidInstanceID.Malformed) def test_console_output(self): for ec2_id, e in self.ec2_id_exception_map: self.assertRaises(e, @@ -215,7 +215,7 @@ class EC2TimestampValidationTestCase(test.TestCase): def test_validate_ec2_timestamp_advanced_time(self): - #EC2 request with Timestamp in advanced time + # EC2 request with Timestamp in advanced time timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250) params = {'Timestamp': timeutils.strtime(timestamp, "%Y-%m-%dT%H:%M:%SZ")} @@ -252,14 +252,14 @@ class EC2TimestampValidationTestCase(test.TestCase): def test_validate_Expires_timestamp_invalid_format(self): - #EC2 request with invalid Expires + # EC2 request with invalid Expires params = {'Expires': '2011-04-22T11:29:49'} expired = ec2utils.is_ec2_timestamp_expired(params) self.assertTrue(expired) def test_validate_ec2_req_timestamp_Expires(self): - #EC2 request with both Timestamp and Expires + # EC2 request with both Timestamp and Expires params = {'Timestamp': '2011-04-22T11:29:49Z', 'Expires': timeutils.isotime()} self.assertRaises(exception.InvalidRequest, diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py index 9b84b9dc8b..553ba0b727 100644 --- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py +++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py @@ -409,7 +409,7 @@ class AggregateTestCase(test.NoDBTestCase): raise KeyError self.stubs.Set(self.controller.api, "add_host_to_aggregate", stub_add_host_to_aggregate) - #NOTE(mtreinish) The check for a KeyError here is to ensure that + # NOTE(mtreinish) The check for a KeyError here is to ensure that # if add_host_to_aggregate() raises a KeyError it propagates. At # one point the api code would mask the error as a HTTPBadRequest. # This test is to ensure that this doesn't occur again. diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py index 1136facec5..9494539a4b 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py @@ -38,7 +38,7 @@ FAKE_FLAVORS = { } -#TOD(jogo) dedup these across nova.api.openstack.contrib.test_flavor* +# TODO(jogo) dedup these across nova.api.openstack.contrib.test_flavor* def fake_flavor_get_by_flavor_id(flavorid, ctxt=None): return FAKE_FLAVORS['flavor %s' % flavorid] diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py b/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py index 970cb8ff16..0ff6ec3105 100644 --- a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py +++ b/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py @@ -40,11 +40,11 @@ begin3 = end2 = datetime.datetime(2012, 7, 6, 6, 0, 0) end3 = datetime.datetime(2012, 7, 7, 6, 0, 0) -#test data +# test data TEST_LOGS1 = [ - #all services done, no errors. + # all services done, no errors. dict(host="plonk", period_beginning=begin1, period_ending=end1, state="DONE", errors=0, task_items=23, message="test1"), dict(host="baz", period_beginning=begin1, period_ending=end1, @@ -57,7 +57,7 @@ TEST_LOGS1 = [ TEST_LOGS2 = [ - #some still running... + # some still running... dict(host="plonk", period_beginning=begin2, period_ending=end2, state="DONE", errors=0, task_items=23, message="test5"), dict(host="baz", period_beginning=begin2, period_ending=end2, @@ -70,7 +70,7 @@ TEST_LOGS2 = [ TEST_LOGS3 = [ - #some errors.. + # some errors.. dict(host="plonk", period_beginning=begin3, period_ending=end3, state="DONE", errors=0, task_items=23, message="test9"), dict(host="baz", period_beginning=begin3, period_ending=end3, diff --git a/nova/tests/api/openstack/compute/extensions/foxinsocks.py b/nova/tests/api/openstack/compute/extensions/foxinsocks.py index 5785f1037a..7d1e273ea7 100644 --- a/nova/tests/api/openstack/compute/extensions/foxinsocks.py +++ b/nova/tests/api/openstack/compute/extensions/foxinsocks.py @@ -45,7 +45,7 @@ class FoxInSocksServerControllerExtension(wsgi.Controller): class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): - #NOTE: This only handles JSON responses. + # NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') @@ -53,7 +53,7 @@ class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): - #NOTE: This only handles JSON responses. + # NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['big_bands'] = 'Pig Bands!' diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py index 5f9f1a883d..52bdaddaf9 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py @@ -484,9 +484,6 @@ class FlavorDisabledTest(test.TestCase): super(FlavorDisabledTest, self).setUp() fakes.stub_out_nw_api(self.stubs) - #def fake_flavor_get_all(*args, **kwargs): - # return FAKE_FLAVORS - # self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list", fake_get_all_flavors_sorted_list) self.stubs.Set(nova.compute.flavors, diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py index 1ab5e86f8a..ce1f343b31 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py @@ -515,19 +515,19 @@ class ServerMetaDataTest(BaseTest): req.method = 'PUT' req.headers["content-type"] = "application/json" - #test for long key + # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update_all, req, self.uuid, data) - #test for long value + # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update_all, req, self.uuid, data) - #test for empty key. + # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPBadRequest, diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index f00e4b0bf4..a938038a11 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -2880,7 +2880,7 @@ class ServersViewBuilderTest(test.TestCase): self.assertNotIn('fault', output['server']) def test_build_server_detail_active_status(self): - #set the power state of the instance to running + # set the power state of the instance to running self.instance['vm_state'] = vm_states.ACTIVE self.instance['progress'] = 100 image_bookmark = "http://localhost:9292/images/5" diff --git a/nova/tests/api/openstack/compute/test_api.py b/nova/tests/api/openstack/compute/test_api.py index fc83c4fd19..15f9fe499a 100644 --- a/nova/tests/api/openstack/compute/test_api.py +++ b/nova/tests/api/openstack/compute/test_api.py @@ -79,7 +79,7 @@ class APITest(test.NoDBTestCase): def raise_webob_exc(req): raise webob.exc.HTTPNotFound(explanation='Raised a webob.exc') - #api.application = raise_webob_exc + # api.application = raise_webob_exc api = self._wsgi_app(raise_webob_exc) resp = webob.Request.blank('/').get_response(api) self.assertEqual(resp.status_int, 404, resp.body) @@ -90,7 +90,7 @@ class APITest(test.NoDBTestCase): exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc') return wsgi.Fault(exc) - #api.application = raise_api_fault + # api.application = raise_api_fault api = self._wsgi_app(raise_api_fault) resp = webob.Request.blank('/').get_response(api) self.assertIn('itemNotFound', resp.body) @@ -101,7 +101,7 @@ class APITest(test.NoDBTestCase): def fail(req): raise Exception("Threw an exception") - #api.application = fail + # api.application = fail api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertIn('{"computeFault', resp.body) @@ -112,7 +112,7 @@ class APITest(test.NoDBTestCase): def fail(req): raise Exception("Threw an exception") - #api.application = fail + # api.application = fail api = self._wsgi_app(fail) resp = webob.Request.blank('/.xml').get_response(api) self.assertIn(' terminate + # check failed to schedule --> terminate params = {'vm_state': vm_states.ERROR} instance = self._create_fake_instance_obj(params=params) self.compute.terminate_instance(self.context, instance, [], []) @@ -4725,7 +4725,7 @@ class ComputeTestCase(BaseTestCase): self.context.elevated(), instance.uuid, 'pre-migrating') - #verify + # verify self.assertRaises(test.TestingException, self.compute.resize_instance, self.context, instance=instance, migration=migration, image={}, @@ -6175,7 +6175,7 @@ class ComputeTestCase(BaseTestCase): instance.update(filters) old_instances.append(fake_instance.fake_db_instance(**instance)) - #not expired + # not expired instances = list(old_instances) # copy the contents of old_instances new_instance = { 'uuid': str(uuid.uuid4()), @@ -10640,7 +10640,7 @@ class EvacuateHostTestCase(BaseTestCase): """Confirm evacuate scenario updates vm_state to stopped if instance is in stopped state """ - #Initialize the VM to stopped state + # Initialize the VM to stopped state db.instance_update(self.context, self.inst_ref['uuid'], {"vm_state": vm_states.STOPPED}) self.inst_ref['vm_state'] = vm_states.STOPPED @@ -10650,7 +10650,7 @@ class EvacuateHostTestCase(BaseTestCase): self._rebuild() - #Check the vm state is reset to stopped + # Check the vm state is reset to stopped instance = db.instance_get(self.context, self.inst_ref['id']) self.assertEqual(instance['vm_state'], vm_states.STOPPED) diff --git a/nova/tests/console/test_console.py b/nova/tests/console/test_console.py index b107cdf4fe..bd3dbeb6b6 100644 --- a/nova/tests/console/test_console.py +++ b/nova/tests/console/test_console.py @@ -47,8 +47,6 @@ class ConsoleTestCase(test.TestCase): def _create_instance(self): """Create a test instance.""" inst = {} - #inst['host'] = self.host - #inst['name'] = 'instance-1234' inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user_id diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index adaf68fc1f..a0969ec91a 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -332,7 +332,7 @@ class AggregateDBApiTestCase(test.TestCase): matchers.DictMatches(_get_fake_aggr_metadata())) def test_aggregate_create_delete_create_with_metadata(self): - #test for bug 1052479 + # test for bug 1052479 ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt) expected_metadata = db.aggregate_metadata_get(ctxt, result['id']) @@ -1635,29 +1635,29 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin): instance = self.create_instance_with_args( metadata={'foo': 'bar'}) self.create_instance_with_args() - #For format 'tag-' + # For format 'tag-' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag-key', 'value': 'foo'}, {'name': 'tag-value', 'value': 'bar'}, ]}) self._assertEqualListsOfInstances([instance], result) - #For format 'tag:' + # For format 'tag:' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag:foo', 'value': 'bar'}, ]}) self._assertEqualListsOfInstances([instance], result) - #For non-existent tag + # For non-existent tag result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag:foo', 'value': 'barred'}, ]}) self.assertEqual([], result) - #Confirm with deleted tags + # Confirm with deleted tags db.instance_metadata_delete(self.ctxt, instance['uuid'], 'foo') - #For format 'tag-' + # For format 'tag-' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag-key', 'value': 'foo'}, @@ -1668,7 +1668,7 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin): {'name': 'tag-value', 'value': 'bar'} ]}) self.assertEqual([], result) - #For format 'tag:' + # For format 'tag:' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag:foo', 'value': 'bar'}, @@ -2784,10 +2784,10 @@ class InstanceTypeTestCase(BaseInstanceTypeTestCase): real_it = db.flavor_get_all(self.ctxt, filters=filters) self._assertEqualListsOfObjects(expected_it, real_it) - #no filter + # no filter assert_multi_filter_flavor_get() - #test only with one filter + # test only with one filter for filt in mem_filts: assert_multi_filter_flavor_get(filt) for filt in root_filts: @@ -2797,7 +2797,7 @@ class InstanceTypeTestCase(BaseInstanceTypeTestCase): for filt in is_public_filts: assert_multi_filter_flavor_get(filt) - #test all filters together + # test all filters together for mem in mem_filts: for root in root_filts: for disabled in disabled_filts: diff --git a/nova/tests/db/test_migration_utils.py b/nova/tests/db/test_migration_utils.py index d3f93710e3..ecdb298db7 100644 --- a/nova/tests/db/test_migration_utils.py +++ b/nova/tests/db/test_migration_utils.py @@ -90,7 +90,7 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase): Column('c', String(256))) table.create() - #check missing shadow table + # check missing shadow table self.assertRaises(NoSuchTableError, utils.check_shadow_table, engine, table_name) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 8e8e3aa0be..d088c299ed 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -32,12 +32,12 @@ class StubGlanceClient(object): _images = images or [] map(lambda image: self.create(**image), _images) - #NOTE(bcwaldon): HACK to get client.images.* to work + # NOTE(bcwaldon): HACK to get client.images.* to work self.images = lambda: None for fn in ('list', 'get', 'data', 'create', 'update', 'delete'): setattr(self.images, fn, getattr(self, fn)) - #TODO(bcwaldon): implement filters + # TODO(bcwaldon): implement filters def list(self, filters=None, marker=None, limit=30, page_size=20): if marker is None: index = 0 diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py index 0e8d3ac114..e810ef4e99 100644 --- a/nova/tests/image/fake.py +++ b/nova/tests/image/fake.py @@ -154,7 +154,7 @@ class _FakeImageService(object): self._imagedata = {} super(_FakeImageService, self).__init__() - #TODO(bcwaldon): implement optional kwargs such as limit, sort_dir + # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir def detail(self, context, **kwargs): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 557ab25298..c36eec662d 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -264,7 +264,7 @@ class TestGlanceImageService(test.NoDBTestCase): self.flags(allowed_direct_url_schemes=['file'], group='glance') self.flags(group='image_file_url', filesystems=['gluster']) service = self._create_image_service(client) - #NOTE(Jbresnah) The following options must be added after the module + # NOTE(Jbresnah) The following options must be added after the module # has added the specific groups. self.flags(group='image_file_url:gluster', id=fs_id) self.flags(group='image_file_url:gluster', mountpoint=mountpoint) @@ -304,7 +304,7 @@ class TestGlanceImageService(test.NoDBTestCase): self.flags(allowed_direct_url_schemes=['file'], group='glance') self.flags(group='image_file_url', filesystems=['gluster']) service = self._create_image_service(client) - #NOTE(Jbresnah) The following options must be added after the module + # NOTE(Jbresnah) The following options must be added after the module # has added the specific groups. self.flags(group='image_file_url:gluster', id='someotherid') self.flags(group='image_file_url:gluster', mountpoint=mountpoint) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 06e8c0401f..110bad3844 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -282,7 +282,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): LOG.debug("Found_server=%s" % found_server) # TODO(justinsb): Mock doesn't yet do accurate state changes - #if found_server['status'] != 'deleting': + # if found_server['status'] != 'deleting': # break time.sleep(.1) diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 5bd1696054..1ff73b9361 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -39,10 +39,10 @@ from nova import utils CONF = cfg.CONF -#NOTE: Neutron client raises Exception which is discouraged by HACKING. -# We set this variable here and use it for assertions below to avoid -# the hacking checks until we can make neutron client throw a custom -# exception class instead. +# NOTE: Neutron client raises Exception which is discouraged by HACKING. +# We set this variable here and use it for assertions below to avoid +# the hacking checks until we can make neutron client throw a custom +# exception class instead. NEUTRON_CLIENT_EXCEPTION = Exception @@ -2525,7 +2525,7 @@ class TestNeutronClientForAdminScenarios(test.TestCase): client.Client.__init__(**kwargs).WithSideEffects(client_mock) self.mox.ReplayAll() - #clean global + # clean global token_store = neutronv2.AdminTokenStore.get() token_store.admin_auth_token = None if admin_context: diff --git a/nova/tests/objects/test_compute_node.py b/nova/tests/objects/test_compute_node.py index 6b426e608e..a7b89bc22c 100644 --- a/nova/tests/objects/test_compute_node.py +++ b/nova/tests/objects/test_compute_node.py @@ -89,7 +89,7 @@ class _TestComputeNodeObject(object): compute = compute_node.ComputeNode() compute.service_id = 456 compute.stats = fake_stats - #NOTE (pmurray): host_ip is coerced to an IPAddress + # NOTE (pmurray): host_ip is coerced to an IPAddress compute.host_ip = fake_host_ip compute.create(self.context) self.compare_obj(compute, fake_compute_node, @@ -121,7 +121,7 @@ class _TestComputeNodeObject(object): compute.id = 123 compute.vcpus_used = 3 compute.stats = fake_stats - #NOTE (pmurray): host_ip is coerced to an IPAddress + # NOTE (pmurray): host_ip is coerced to an IPAddress compute.host_ip = fake_host_ip compute.save(self.context) self.compare_obj(compute, fake_compute_node, diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index d1ccade4c5..278e8fe854 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -302,7 +302,7 @@ class _BaseTestCase(test.TestCase): def json_comparator(self, expected, obj_val): # json-ify an object field for comparison with its db str - #equivalent + # equivalent self.assertEqual(expected, jsonutils.dumps(obj_val)) def str_comparator(self, expected, obj_val): diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index 634cf66927..eb4de0cb29 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -464,15 +464,15 @@ class HostFiltersTestCase(test.NoDBTestCase): service = {'disabled': False} host = fakes.FakeHostState('fake_host', 'fake_node', {'service': service}) - #True since empty + # True since empty self.assertTrue(filt_cls.host_passes(host, filter_properties)) fakes.FakeInstance(context=self.context, params={'host': 'fake_host', 'instance_type_id': 1}) - #True since same type + # True since same type self.assertTrue(filt_cls.host_passes(host, filter_properties)) - #False since different type + # False since different type self.assertFalse(filt_cls.host_passes(host, filter2_properties)) - #False since node not homogeneous + # False since node not homogeneous fakes.FakeInstance(context=self.context, params={'host': 'fake_host', 'instance_type_id': 2}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @@ -488,13 +488,13 @@ class HostFiltersTestCase(test.NoDBTestCase): service = {'disabled': False} host = fakes.FakeHostState('fake_host', 'fake_node', {'service': service}) - #True since no aggregates + # True since no aggregates self.assertTrue(filt_cls.host_passes(host, filter_properties)) - #True since type matches aggregate, metadata + # True since type matches aggregate, metadata self._create_aggregate_with_host(name='fake_aggregate', hosts=['fake_host'], metadata={'instance_type': 'fake1'}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) - #False since type matches aggregate, metadata + # False since type matches aggregate, metadata self.assertFalse(filt_cls.host_passes(host, filter2_properties)) def test_ram_filter_fails_on_memory(self): diff --git a/nova/tests/test_safeutils.py b/nova/tests/test_safeutils.py index e42ddea5c5..66d20ca79e 100644 --- a/nova/tests/test_safeutils.py +++ b/nova/tests/test_safeutils.py @@ -24,7 +24,7 @@ class GetCallArgsTestCase(test.NoDBTestCase): args = () kwargs = {'instance': {'uuid': 1}, 'red': 3, 'blue': 4} callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs) - #implicit self counts as an arg + # implicit self counts as an arg self.assertEqual(4, len(callargs)) self.assertIn('instance', callargs) self.assertEqual({'uuid': 1}, callargs['instance']) @@ -37,7 +37,7 @@ class GetCallArgsTestCase(test.NoDBTestCase): args = ({'uuid': 1}, 3, 4) kwargs = {} callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs) - #implicit self counts as an arg + # implicit self counts as an arg self.assertEqual(4, len(callargs)) self.assertIn('instance', callargs) self.assertEqual({'uuid': 1}, callargs['instance']) @@ -50,7 +50,7 @@ class GetCallArgsTestCase(test.NoDBTestCase): args = ({'uuid': 1}, 3) kwargs = {'blue': 4} callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs) - #implicit self counts as an arg + # implicit self counts as an arg self.assertEqual(4, len(callargs)) self.assertIn('instance', callargs) self.assertEqual({'uuid': 1}, callargs['instance']) @@ -63,7 +63,7 @@ class GetCallArgsTestCase(test.NoDBTestCase): args = () kwargs = {'instance': {'uuid': 1}, 'red': 3} callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs) - #implicit self counts as an arg + # implicit self counts as an arg self.assertEqual(4, len(callargs)) self.assertIn('instance', callargs) self.assertEqual({'uuid': 1}, callargs['instance']) @@ -76,7 +76,7 @@ class GetCallArgsTestCase(test.NoDBTestCase): args = ({'uuid': 1}, 3) kwargs = {} callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs) - #implicit self counts as an arg + # implicit self counts as an arg self.assertEqual(4, len(callargs)) self.assertIn('instance', callargs) self.assertEqual({'uuid': 1}, callargs['instance']) diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index 567630537a..77660fe9ed 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -381,7 +381,7 @@ class AuditPeriodTest(test.NoDBTestCase): def setUp(self): super(AuditPeriodTest, self).setUp() - #a fairly random time to test with + # a fairly random time to test with self.test_time = datetime.datetime(second=23, minute=12, hour=8, diff --git a/nova/tests/virt/libvirt/fake_imagebackend.py b/nova/tests/virt/libvirt/fake_imagebackend.py index 0946b1a6db..f2a0de969b 100644 --- a/nova/tests/virt/libvirt/fake_imagebackend.py +++ b/nova/tests/virt/libvirt/fake_imagebackend.py @@ -53,7 +53,7 @@ class Backend(object): return FakeImage(instance, name) def snapshot(self, path, image_type=''): - #NOTE(bfilippov): this is done in favor for + # NOTE(bfilippov): this is done in favor for # snapshot tests in test_libvirt.LibvirtConnTestCase return imagebackend.Backend(True).snapshot(path, image_type) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index c793de3ba2..f8a8f9b81a 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -4476,7 +4476,7 @@ class LibvirtConnTestCase(test.TestCase, self.compute._rollback_live_migration(self.context, instance_ref, 'dest', False) - #start test + # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}} @@ -4518,7 +4518,7 @@ class LibvirtConnTestCase(test.TestCase, self.compute._rollback_live_migration(self.context, instance_ref, 'dest', False) - #start test + # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}} @@ -4559,7 +4559,7 @@ class LibvirtConnTestCase(test.TestCase, self.compute._rollback_live_migration(self.context, instance_ref, 'dest', False) - #start test + # start test migrate_data = {} self.mox.ReplayAll() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @@ -4595,7 +4595,7 @@ class LibvirtConnTestCase(test.TestCase, self.compute._rollback_live_migration(self.context, instance_ref, 'dest', False) - #start test + # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}} @@ -4650,7 +4650,7 @@ class LibvirtConnTestCase(test.TestCase, self.compute._rollback_live_migration(self.context, instance_ref, 'dest', False) - #start test + # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}} diff --git a/nova/tests/virt/xenapi/stubs.py b/nova/tests/virt/xenapi/stubs.py index 3ad289d1c6..a2225af388 100644 --- a/nova/tests/virt/xenapi/stubs.py +++ b/nova/tests/virt/xenapi/stubs.py @@ -44,7 +44,7 @@ def stubout_instance_snapshot(stubs): stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) def fake_wait_for_vhd_coalesce(*args): - #TODO(sirp): Should we actually fake out the data here + # TODO(sirp): Should we actually fake out the data here return "fakeparent", "fakebase" stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce) diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index 2bb9dfceb9..ede8be7007 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -1165,7 +1165,7 @@ iface eth0 inet6 static def fake_resetnetwork(self, method, args): fake_resetnetwork.called = True - #NOTE(johngarbutt): as returned by FreeBSD and Gentoo + # NOTE(johngarbutt): as returned by FreeBSD and Gentoo return jsonutils.dumps({'returncode': '500', 'message': 'success'}) self.stubs.Set(stubs.FakeSessionForVMTests, @@ -2741,7 +2741,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase): 'from_port': 200, 'to_port': 299, 'cidr': '192.168.99.0/24'}) - #validate the extra rule + # validate the extra rule self.fw.refresh_security_group_rules(secgroup) regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299' ' -s 192.168.99.0/24') diff --git a/nova/utils.py b/nova/utils.py index 1ca10b8e1b..2f41914a3c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -825,7 +825,7 @@ def mkfs(fs, path, label=None, run_as_root=False): args = ['mkswap'] else: args = ['mkfs', '-t', fs] - #add -F to force no interactive execute on non-block device. + # add -F to force no interactive execute on non-block device. if fs in ('ext3', 'ext4', 'ntfs'): args.extend(['-F']) if label: diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py index 0cf0b637ab..351ca20f64 100644 --- a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py +++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py @@ -41,7 +41,6 @@ def upgrade(migrate_engine): Column('prov_vlan_id', Integer), Column('terminal_port', Integer), mysql_engine='InnoDB', - #mysql_charset='utf8' ) bm_interfaces = Table('bm_interfaces', meta, @@ -56,7 +55,6 @@ def upgrade(migrate_engine): Column('port_no', Integer), Column('vif_uuid', String(length=36), unique=True), mysql_engine='InnoDB', - #mysql_charset='utf8' ) bm_pxe_ips = Table('bm_pxe_ips', meta, @@ -69,7 +67,6 @@ def upgrade(migrate_engine): Column('bm_node_id', Integer), Column('server_address', String(length=255), unique=True), mysql_engine='InnoDB', - #mysql_charset='utf8' ) bm_deployments = Table('bm_deployments', meta, @@ -85,7 +82,6 @@ def upgrade(migrate_engine): Column('root_mb', Integer), Column('swap_mb', Integer), mysql_engine='InnoDB', - #mysql_charset='utf8' ) bm_nodes.create() diff --git a/nova/virt/driver.py b/nova/virt/driver.py index acfb90f26d..e6f7f7264c 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -1145,7 +1145,7 @@ class ComputeDriver(object): def add_to_aggregate(self, context, aggregate, host, **kwargs): """Add a compute host to an aggregate.""" - #NOTE(jogo) Currently only used for XenAPI-Pool + # NOTE(jogo) Currently only used for XenAPI-Pool raise NotImplementedError() def remove_from_aggregate(self, context, aggregate, host, **kwargs): diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py index 452edbf05a..15e65ed9c7 100644 --- a/nova/virt/firewall.py +++ b/nova/virt/firewall.py @@ -344,7 +344,7 @@ class IptablesFirewallDriver(FirewallDriver): # Set up rules to allow traffic to/from DHCP server self._do_dhcp_rules(ipv4_rules, network_info) - #Allow project network traffic + # Allow project network traffic if CONF.allow_same_net_traffic: self._do_project_network_rules(ipv4_rules, ipv6_rules, network_info) diff --git a/nova/virt/hyperv/livemigrationutils.py b/nova/virt/hyperv/livemigrationutils.py index 579965897e..4ff16fc2ab 100644 --- a/nova/virt/hyperv/livemigrationutils.py +++ b/nova/virt/hyperv/livemigrationutils.py @@ -181,7 +181,6 @@ class LiveMigrationUtils(object): for sasd in sasds: if (sasd.ResourceType == 31 and sasd.ResourceSubType == "Microsoft:Hyper-V:Virtual Hard Disk"): - #sasd.PoolId = "" new_resource_setting_data.append(sasd.GetText_(1)) return new_resource_setting_data diff --git a/nova/virt/hyperv/networkutils.py b/nova/virt/hyperv/networkutils.py index 27571485cd..07ad489187 100644 --- a/nova/virt/hyperv/networkutils.py +++ b/nova/virt/hyperv/networkutils.py @@ -48,7 +48,7 @@ class NetworkUtils(object): def create_vswitch_port(self, vswitch_path, port_name): switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] - #Create a port on the vswitch. + # Create a port on the vswitch. (new_port, ret_val) = switch_svc.CreateSwitchPort( Name=str(uuid.uuid4()), FriendlyName=port_name, diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py index 3b64010c6e..2fa9fe83ac 100644 --- a/nova/virt/hyperv/vif.py +++ b/nova/virt/hyperv/vif.py @@ -78,5 +78,5 @@ class HyperVNovaNetworkVIFDriver(HyperVBaseVIFDriver): self._vmutils.set_nic_connection(vm_name, vif['id'], vswitch_data) def unplug(self, instance, vif): - #TODO(alepilotti) Not implemented + # TODO(alepilotti) Not implemented pass diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index 6f12429770..9f72b06b56 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -363,7 +363,7 @@ class VMOps(object): try: if self._vmutils.vm_exists(instance_name): - #Stop the VM first. + # Stop the VM first. self.power_off(instance) storage = self._vmutils.get_vm_storage_paths(instance_name) diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py index aa28df72f2..81d2ed98f6 100644 --- a/nova/virt/hyperv/vmutils.py +++ b/nova/virt/hyperv/vmutils.py @@ -111,7 +111,7 @@ class VMUtils(object): wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS, wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS) settings_paths = [v.path_() for v in vmsettings] - #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx + # See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx (ret_val, summary_info) = vs_man_svc.GetSummaryInformation( [constants.VM_SUMMARY_NUM_PROCS, constants.VM_SUMMARY_ENABLED_STATE, @@ -309,10 +309,10 @@ class VMUtils(object): drive = self._get_new_resource_setting_data(res_sub_type) - #Set the IDE ctrller as parent. + # Set the IDE ctrller as parent. drive.Parent = ctrller_path drive.Address = drive_addr - #Add the cloned disk drive object to the vm. + # Add the cloned disk drive object to the vm. new_resources = self._add_virt_resource(drive, vm.path_()) drive_path = new_resources[0] @@ -322,11 +322,11 @@ class VMUtils(object): res_sub_type = self._IDE_DVD_RES_SUB_TYPE res = self._get_new_resource_setting_data(res_sub_type) - #Set the new drive as the parent. + # Set the new drive as the parent. res.Parent = drive_path res.Connection = [path] - #Add the new vhd object as a virtual hard disk to the vm. + # Add the new vhd object as a virtual hard disk to the vm. self._add_virt_resource(res, vm.path_()) def create_scsi_controller(self, vm_name): @@ -366,17 +366,17 @@ class VMUtils(object): def create_nic(self, vm_name, nic_name, mac_address): """Create a (synthetic) nic and attach it to the vm.""" - #Create a new nic + # Create a new nic new_nic_data = self._get_new_setting_data( self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS) - #Configure the nic + # Configure the nic new_nic_data.ElementName = nic_name new_nic_data.Address = mac_address.replace(':', '') new_nic_data.StaticMacAddress = 'True' new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] - #Add the new nic to the vm + # Add the new nic to the vm vm = self._lookup_vm_check(vm_name) self._add_virt_resource(new_nic_data, vm.path_()) @@ -386,8 +386,8 @@ class VMUtils(object): vm = self._lookup_vm_check(vm_name) (job_path, ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state]) - #Invalid state for current operation (32775) typically means that - #the VM is already in the state requested + # Invalid state for current operation (32775) typically means that + # the VM is already in the state requested self.check_ret_val(ret_val, job_path, [0, 32775]) LOG.debug("Successfully changed vm state of %(vm_name)s " "to %(req_state)s", @@ -430,7 +430,7 @@ class VMUtils(object): vm = self._lookup_vm_check(vm_name) vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - #Remove the VM. Does not destroy disks. + # Remove the VM. Does not destroy disks. (job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) self.check_ret_val(ret_val, job_path) diff --git a/nova/virt/hyperv/vmutilsv2.py b/nova/virt/hyperv/vmutilsv2.py index ed2c0788cc..61a88291cf 100644 --- a/nova/virt/hyperv/vmutilsv2.py +++ b/nova/virt/hyperv/vmutilsv2.py @@ -104,11 +104,11 @@ class VMUtilsV2(vmutils.VMUtils): drive = self._get_new_resource_setting_data(res_sub_type) - #Set the IDE ctrller as parent. + # Set the IDE ctrller as parent. drive.Parent = ctrller_path drive.Address = drive_addr drive.AddressOnParent = drive_addr - #Add the cloned disk drive object to the vm. + # Add the cloned disk drive object to the vm. new_resources = self._add_virt_resource(drive, vm.path_()) drive_path = new_resources[0] @@ -157,7 +157,7 @@ class VMUtilsV2(vmutils.VMUtils): vm = self._lookup_vm_check(vm_name) vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - #Remove the VM. It does not destroy any associated virtual disk. + # Remove the VM. It does not destroy any associated virtual disk. (job_path, ret_val) = vs_man_svc.DestroySystem(vm.path_()) self.check_ret_val(ret_val, job_path) diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py index faaea2d2a0..c72af82fee 100644 --- a/nova/virt/hyperv/volumeops.py +++ b/nova/virt/hyperv/volumeops.py @@ -123,18 +123,18 @@ class VolumeOps(object): target_lun = data['target_lun'] target_iqn = data['target_iqn'] - #Getting the mounted disk + # Getting the mounted disk mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn, target_lun) if ebs_root: - #Find the IDE controller for the vm. + # Find the IDE controller for the vm. ctrller_path = self._vmutils.get_vm_ide_controller( instance_name, 0) - #Attaching to the first slot + # Attaching to the first slot slot = 0 else: - #Find the SCSI controller for the vm + # Find the SCSI controller for the vm ctrller_path = self._vmutils.get_vm_scsi_controller( instance_name) slot = self._get_free_controller_slot(ctrller_path) @@ -179,7 +179,7 @@ class VolumeOps(object): target_lun = data['target_lun'] target_iqn = data['target_iqn'] - #Getting the mounted disk + # Getting the mounted disk mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn, target_lun) @@ -224,7 +224,7 @@ class VolumeOps(object): LOG.debug('Device number: %(device_number)s, ' 'target lun: %(target_lun)s', {'device_number': device_number, 'target_lun': target_lun}) - #Finding Mounted disk drive + # Finding Mounted disk drive for i in range(0, CONF.hyperv.volume_attach_retry_count): mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number( device_number) @@ -238,10 +238,10 @@ class VolumeOps(object): return mounted_disk_path def disconnect_volume(self, physical_drive_path): - #Get the session_id of the ISCSI connection + # Get the session_id of the ISCSI connection session_id = self._volutils.get_session_id_from_mounted_disk( physical_drive_path) - #Logging out the target + # Logging out the target self._volutils.execute_log_out(session_id) def get_target_from_disk_path(self, physical_drive_path): diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py index ccd890daef..05be31af90 100644 --- a/nova/virt/hyperv/volumeutils.py +++ b/nova/virt/hyperv/volumeutils.py @@ -64,7 +64,7 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils): self.execute('iscsicli.exe', 'RefreshTargetPortal', target_address, target_port) else: - #Adding target portal to iscsi initiator. Sending targets + # Adding target portal to iscsi initiator. Sending targets self.execute('iscsicli.exe', 'AddTargetPortal', target_address, target_port, '*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*', @@ -74,7 +74,7 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils): """Ensure that the target is logged in.""" self._login_target_portal(target_portal) - #Listing targets + # Listing targets self.execute('iscsicli.exe', 'ListTargets') retry_count = CONF.hyperv.volume_attach_retry_count diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index cdf56839be..a4da68d1e9 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1086,7 +1086,7 @@ class LibvirtDriver(driver.ComputeDriver): if destroy_disks: self._cleanup_lvm(instance) - #NOTE(haomai): destroy volumes if needed + # NOTE(haomai): destroy volumes if needed if CONF.libvirt.images_type == 'rbd': self._cleanup_rbd(instance) @@ -1932,7 +1932,7 @@ class LibvirtDriver(driver.ComputeDriver): except exception.InstanceNotFound: raise exception.InstanceNotRunning(instance_id=instance.uuid) - ##### Find dev name + # Find dev name my_dev = None xml = virt_dom.XMLDesc(0) @@ -3617,7 +3617,7 @@ class LibvirtDriver(driver.ComputeDriver): container_dir=container_dir, use_cow=CONF.use_cow_images) try: - #Note(GuanQiang): save container root device name here, used for + # Note(GuanQiang): save container root device name here, used for # detaching the linked image device when deleting # the lxc instance. if container_root_device: @@ -4097,7 +4097,7 @@ class LibvirtDriver(driver.ComputeDriver): "vendor_id": cfgdev.pci_capability.vendor_id[2:6], } - #requirement by DataBase Model + # requirement by DataBase Model device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device device.update(_get_device_type(cfgdev)) return device @@ -4207,9 +4207,9 @@ class LibvirtDriver(driver.ComputeDriver): return domain.interfaceStats(iface_id) def get_console_pool_info(self, console_type): - #TODO(mdragon): console proxy should be implemented for libvirt, - # in case someone wants to use it with kvm or - # such. For now return fake data. + # TODO(mdragon): console proxy should be implemented for libvirt, + # in case someone wants to use it with kvm or + # such. For now return fake data. return {'address': '127.0.0.1', 'username': 'fakeuser', 'password': 'fakepassword'} @@ -5038,7 +5038,7 @@ class LibvirtDriver(driver.ComputeDriver): def get_host_uptime(self, host): """Returns the result of calling "uptime".""" - #NOTE(dprince): host seems to be ignored for this call and in + # NOTE(dprince): host seems to be ignored for this call and in # other compute drivers as well. Perhaps we should remove it? out, err = utils.execute('env', 'LANG=C', 'uptime') return out @@ -5537,7 +5537,7 @@ class HostState(object): disk_info_dict = self.driver._get_local_gb_info() data = {} - #NOTE(dprince): calling capabilities before getVersion works around + # NOTE(dprince): calling capabilities before getVersion works around # an initialization issue with some versions of Libvirt (1.0.5.5). # See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116 # See: https://bugs.launchpad.net/nova/+bug/1215593 diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index 4caf9e0001..a79934dfa4 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -336,7 +336,7 @@ class Raw(Image): generating = 'image_id' not in kwargs if generating: if not self.check_image_exists(): - #Generating image in place + # Generating image in place prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): @@ -463,7 +463,7 @@ class Lvm(Image): generated = 'ephemeral_size' in kwargs - #Generate images with specified size right on volume + # Generate images with specified size right on volume if generated and size: lvm.create_volume(self.vg, self.lv, size, sparse=self.sparse) diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py index af0cd40f97..d772c8eb57 100644 --- a/nova/virt/libvirt/utils.py +++ b/nova/virt/libvirt/utils.py @@ -531,7 +531,7 @@ def is_mounted(mount_path, source=None): except processutils.ProcessExecutionError as exc: return False except OSError as exc: - #info since it's not required to have this tool. + # info since it's not required to have this tool. if exc.errno == errno.ENOENT: LOG.info(_LI("findmnt tool is not installed")) return False diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py index d6aef9d1de..af4dd4922f 100644 --- a/nova/virt/libvirt/volume.py +++ b/nova/virt/libvirt/volume.py @@ -258,9 +258,9 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver): iscsi_properties = connection_info['data'] if self.use_multipath: - #multipath installed, discovering other targets if available - #multipath should be configured on the nova-compute node, - #in order to fit storage vendor + # multipath installed, discovering other targets if available + # multipath should be configured on the nova-compute node, + # in order to fit storage vendor out = self._run_iscsiadm_bare(['-m', 'discovery', '-t', @@ -312,7 +312,7 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver): 'tries': tries}) if self.use_multipath: - #we use the multipath device instead of the single path device + # we use the multipath device instead of the single path device self._rescan_multipath() multipath_device = self._get_multipath_device_name(host_device) @@ -465,8 +465,8 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver): "node.session.auth.password", iscsi_properties['auth_password']) - #duplicate logins crash iscsiadm after load, - #so we scan active sessions to see if the node is logged in. + # duplicate logins crash iscsiadm after load, + # so we scan active sessions to see if the node is logged in. out = self._run_iscsiadm_bare(["-m", "session"], run_as_root=True, check_exit_code=[0, 1, 21])[0] or "" @@ -487,8 +487,8 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver): ("--login",), check_exit_code=[0, 255]) except processutils.ProcessExecutionError as err: - #as this might be one of many paths, - #only set successful logins to startup automatically + # as this might be one of many paths, + # only set successful logins to startup automatically if err.exit_code in [15]: self._iscsiadm_update(iscsi_properties, "node.startup", @@ -730,7 +730,7 @@ class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver): # NOTE(jbr_): If aoedevpath does not exist, do a discover. self._aoe_discover() - #NOTE(jbr_): Device path is not always present immediately + # NOTE(jbr_): Device path is not always present immediately def _wait_for_device_discovery(aoedevpath, mount_device): tries = self.tries if os.path.exists(aoedevpath): diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index d5b7e4cc72..b7ff03bfee 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -136,7 +136,7 @@ class VMwareESXDriver(driver.ComputeDriver): self._host = host.Host(self._session) self._host_state = None - #TODO(hartsocks): back-off into a configuration test module. + # TODO(hartsocks): back-off into a configuration test module. if CONF.vmware.use_linked_clone is None: raise error_util.UseLinkedCloneConfigurationFault() diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index a9dacd134e..d0e7864866 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -1048,7 +1048,7 @@ def propset_dict(propset): if propset is None: return {} - #TODO(hartsocks): once support for Python 2.6 is dropped + # TODO(hartsocks): once support for Python 2.6 is dropped # change to {[(prop.name, prop.val) for prop in propset]} return dict([(prop.name, prop.val) for prop in propset]) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 890cccc35e..0bf730711a 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -202,7 +202,7 @@ class VMwareVMOps(object): datastore_regex=self._datastore_regex) dc_info = self.get_datacenter_ref_and_name(datastore.ref) - #TODO(hartsocks): this pattern is confusing, reimplement as methods + # TODO(hartsocks): this pattern is confusing, reimplement as methods # The use of nested functions in this file makes for a confusing and # hard to maintain file. At some future date, refactor this method to # be a full-fledged method. This will also make unit testing easier. @@ -1320,7 +1320,7 @@ class VMwareVMOps(object): uptime=uptime) diags.memory_details.maximum = data.get('memorySizeMB', 0) diags.memory_details.used = data.get('guestMemoryUsage', 0) - #TODO(garyk): add in cpu, nic and disk stats + # TODO(garyk): add in cpu, nic and disk stats return diags def _get_vnc_console_connection(self, instance): diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py index 00d9e27389..17480dc1f4 100644 --- a/nova/virt/xenapi/agent.py +++ b/nova/virt/xenapi/agent.py @@ -338,7 +338,7 @@ class XenAPIBasedAgent(object): def resetnetwork(self): LOG.debug('Resetting network', instance=self.instance) - #NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success + # NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success return self._call_agent('resetnetwork', timeout=CONF.xenserver.agent_resetnetwork_timeout, success_codes=['0', '500']) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index df243f2558..03b8b269eb 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -12,8 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. # -#============================================================================ -# + + # Parts of this file are based upon xmlrpclib.py, the XML-RPC client # interface included in the Python distribution. # @@ -630,7 +630,7 @@ class SessionBase(object): return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref) def host_compute_free_memory(self, _1, ref): - #Always return 12GB available + # Always return 12GB available return 12 * units.Gi def _plugin_agent_version(self, method, args): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index f200937668..d22d1e07ce 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1124,7 +1124,7 @@ def generate_single_ephemeral(session, instance, vm_ref, userdevice, instance_name_label = instance["name"] name_label = "%s ephemeral" % instance_name_label - #TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here + # TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here label_number = int(userdevice) - 4 if label_number > 0: name_label = "%s (%d)" % (name_label, label_number) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b71e48946d..4d276799ed 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -266,7 +266,7 @@ class VMOps(object): def create_disks_step(undo_mgr, disk_image_type, image_meta, name_label): - #TODO(johngarbutt) clean up if this is not run + # TODO(johngarbutt) clean up if this is not run vdis = vm_utils.import_all_migrated_disks(self._session, instance) @@ -994,7 +994,7 @@ class VMOps(object): instance=instance) try: self._restore_orig_vm_and_cleanup_orphan(instance) - #TODO(johngarbutt) should also cleanup VHDs at destination + # TODO(johngarbutt) should also cleanup VHDs at destination except Exception as rollback_error: LOG.warn(_("_migrate_disk_resizing_up failed to " "rollback: %s"), rollback_error, diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 90d2edab62..26607eaed9 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -37,7 +37,7 @@ class VolumeOps(object): def attach_volume(self, connection_info, instance_name, mountpoint, hotplug=True): """Attach volume to VM instance.""" - #TODO(johngarbutt) move this into _attach_volume_to_vm + # TODO(johngarbutt) move this into _attach_volume_to_vm dev_number = volume_utils.get_device_number(mountpoint) vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py index cbeea5884f..2fbef0e6c9 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -31,7 +31,7 @@ translations = gettext.translation('nova', fallback=True) _ = translations.ugettext -##### Logging setup +# Logging setup def configure_logging(name): log = logging.getLogger() @@ -43,7 +43,7 @@ def configure_logging(name): log.addHandler(sysh) -##### Exceptions +# Exceptions class PluginError(Exception): """Base Exception class for all plugin errors.""" @@ -59,7 +59,7 @@ class ArgumentError(PluginError): PluginError.__init__(self, *args) -##### Argument validation +# Argument validation def exists(args, key): """Validates that a freeform string argument to a RPC method call is given. diff --git a/tox.ini b/tox.ini index 2bcec9b530..609f52e79d 100644 --- a/tox.ini +++ b/tox.ini @@ -55,10 +55,10 @@ sitepackages = False # H803 skipped on purpose per list discussion. # E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126 # The rest of the ignores are TODOs -# New from hacking 0.9: E129, E131, E265, H407, H405, H904 +# New from hacking 0.9: E129, E131, H407, H405, H904 # E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301 -ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,E265,H405,H803,H904 +ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,H803,H904 exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools [hacking]