rev439ベースにライブマイグレーションの機能をマージ

このバージョンはEBSなし、CPUフラグのチェックなし
This commit is contained in:
masumotok
2010-12-07 19:25:43 +09:00
parent 7eddff3f89
commit 2925ca3ac3
13 changed files with 745 additions and 10 deletions
+120 -2
View File
@@ -76,6 +76,13 @@ from nova import quota
from nova import utils
from nova.auth import manager
from nova.cloudpipe import pipelib
#added by masumotok
from nova import rpc
# added by masumotok
from nova.api.ec2 import cloud
# added by masumotok
from nova.compute import power_state
FLAGS = flags.FLAGS
@@ -424,6 +431,116 @@ class NetworkCommands(object):
int(network_size), int(vlan_start),
int(vpn_start))
# this class is added by masumotok
class InstanceCommands(object):
"""Class for mangaging VM instances."""
def live_migration(self, ec2_id, dest):
"""live_migration"""
logging.basicConfig()
ctxt = context.get_admin_context()
if 'nova.network.manager.VlanManager' != FLAGS.network_manager :
msg = 'Only nova.network.manager.VlanManager is supported now. Sorry!'
raise Exception(msg)
# 1. whether destination host exists
host_ref = db.host_get_by_name(ctxt, dest)
# 2. whether instance exists and running
# try-catch clause is necessary because only internal_id is shown
# when NotFound exception occurs. it isnot understandable to admins.
try :
internal_id = cloud.ec2_id_to_internal_id(ec2_id)
instance_ref = db.instance_get_by_internal_id(ctxt, internal_id)
except exception.NotFound :
print 'Not found instance_id(%s (internal_id:%s))' % ( ec2_id, internal_id)
raise
if power_state.RUNNING != instance_ref['state'] or \
'running' != instance_ref['state_description']:
print 'Instance(%s) is not running' % ec2_id
sys.exit(1)
# 3. the host where instance is running and dst host is not same
if dest == instance_ref['host'] :
print '%s is where %s is running now. choose different host.' \
% (dest, ec2_id)
sys.exit(2)
# 4. live migration
rpc.cast(ctxt,
FLAGS.scheduler_topic,
{ "method": "live_migration",
"args": {"ec2_id": ec2_id,
"dest":dest}})
print 'Finished all procedure. check instance are migrated successfully'
print 'chech status by using euca-describe-instances.'
# this class is created by masumotok
class HostCommands(object):
"""Class for mangaging host(physical nodes)."""
def list(self):
"""describe host list."""
# to supress msg: No handlers could be found for logger "amqplib"
logging.basicConfig()
host_refs = db.host_get_all(context.get_admin_context())
for host_ref in host_refs:
print host_ref['name']
def show(self, host):
"""describe cpu/memory/hdd info for host."""
# to supress msg: No handlers could be found for logger "amqplib"
logging.basicConfig()
result = rpc.call(context.get_admin_context(),
FLAGS.scheduler_topic,
{"method": "show_host_resource",
"args": {"host": host}})
# checing result msg format is necessary, that will have done
# when this feture is included in API.
if dict != type(result):
print 'Unexpected error occurs'
elif not result['ret'] :
print '%s' % result['msg']
else :
cpu = result['phy_resource']['cpu']
mem = result['phy_resource']['memory_mb']
hdd = result['phy_resource']['hdd_gb']
print 'HOST\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)'
print '%s\t\t\t%s\t%s\t%s' % ( host,cpu, mem, hdd)
for p_id, val in result['usage'].items() :
print '%s\t%s\t\t%s\t%s\t%s' % ( host,
p_id,
val['cpu'],
val['memory_mb'],
val['hdd_gb'])
def has_keys(self, dic, keys):
not_found = [ key for key in keys if not dict.has_key(key) ]
return ( (0 == len(not_found)), not_found )
# modified by masumotok
#CATEGORIES = [
# ('user', UserCommands),
# ('project', ProjectCommands),
# ('role', RoleCommands),
# ('shell', ShellCommands),
# ('vpn', VpnCommands),
# ('floating', FloatingIpCommands),
# ('network', NetworkCommands)]
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@@ -431,8 +548,9 @@ CATEGORIES = [
('shell', ShellCommands),
('vpn', VpnCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands)]
('network', NetworkCommands),
('instance', InstanceCommands),
('host',HostCommands)]
def lazy_match(name, key_value_tuples):
"""Finds all objects that have a key that case insensitively contains
+11 -2
View File
@@ -678,13 +678,22 @@ class CloudController(object):
ec2_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
# modified by masumotok
internal_id = \
floating_ip_ref['fixed_ip']['instance']['internal_id']
ec2_id = internal_id_to_ec2_id(internal_id)
address_rv = {'public_ip': address,
'instance_id': ec2_id}
if context.user.is_admin():
# modified by masumotok- b/c proj_id is never inserted
#details = "%s (%s)" % (address_rv['instance_id'],
# floating_ip_ref['project_id'])
if None != address_rv['instance_id']:
status = 'reserved'
else:
status = None
details = "%s (%s)" % (address_rv['instance_id'],
floating_ip_ref['project_id'])
status)
address_rv['instance_id'] = details
addresses.append(address_rv)
return {'addressesSet': addresses}
+3
View File
@@ -84,6 +84,7 @@ class ComputeAPI(base.Base):
if not type(security_group) is list:
security_group = [security_group]
print '<<<<<<<<<<<<<<<<<<<<<<<<<<1'
security_groups = []
self.ensure_default_security_group(context)
for security_group_name in security_group:
@@ -92,6 +93,7 @@ class ComputeAPI(base.Base):
security_group_name)
security_groups.append(group['id'])
print '<<<<<<<<<<<<<<<<<<<<<<<<<<2'
if key_data is None and key_name:
key_pair = db.key_pair_get(context, context.user_id, key_name)
key_data = key_pair['public_key']
@@ -115,6 +117,7 @@ class ComputeAPI(base.Base):
'key_name': key_name,
'key_data': key_data}
print '<<<<<<<<<<<<<<<<<<<<<<<<<<3'
elevated = context.elevated()
instances = []
logging.debug("Going to run %s instances...", num_instances)
+138
View File
@@ -36,6 +36,13 @@ terminating it.
import datetime
import logging
# added by masumotok
import sys
# added by masumotok
import traceback
# added by masumotok
import os
from twisted.internet import defer
@@ -44,12 +51,19 @@ from nova import flags
from nova import manager
from nova import utils
from nova.compute import power_state
# added by masumotok
from nova import rpc
# added by masumotok
from nova import db
FLAGS = flags.FLAGS
flags.DEFINE_string('instances_path', '$state_path/instances',
'where instances are stored on disk')
flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
'Driver to use for controlling virtualization')
# created by masumotok
flags.DEFINE_string('live_migration_timeout', 30,
'Timeout value for pre_live_migration is completed.')
class ComputeManager(manager.Manager):
@@ -251,3 +265,127 @@ class ComputeManager(manager.Manager):
yield self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
defer.returnValue(True)
# created by masumotok
def get_cpu_number(self):
"""Get the number of physical computer cpu core ."""
return open('/proc/cpuinfo').read().count('processor')
# created by masumotok
def get_mem_size(self):
"""Get the memory size of physical computer ."""
meminfo = open('/proc/meminfo').read().split()
idx = meminfo.index('MemTotal:')
# transforming kb to mb.
return int(meminfo[idx + 1]) / 1024
# created by masumotok
def get_hdd_size(self):
"""Get the hdd size of physical computer ."""
hddinfo = os.statvfs(FLAGS.instances_path)
return hddinfo.f_bsize * hddinfo.f_blocks / 1024 / 1024 / 1024
# created by masumotok
def pre_live_migration(self, context, instance_id, dest):
"""Any preparation for live migration at dst host."""
# 1. getting volume info ( shlf/slot number )
instance_ref = db.instance_get(context, instance_id)
ec2_id = instance_ref['hostname']
volumes = []
try:
volumes = db.volume_get_by_ec2_id(context, ec2_id)
except exception.NotFound:
logging.debug('%s has no volume.', ec2_id)
shelf_slots = {}
for vol in volumes:
shelf, slot = db.volume_get_shelf_and_blade(context, vol['id'])
shelf_slots[vol.id] = (shelf, slot)
# 2. getting fixed ips
fixed_ip = db.instance_get_fixed_address(context, instance_id)
if None == fixed_ip:
logging.error('Not found fixedip for %s\n%s',
ec2_id,
''.join(traceback.format_tb(sys.exc_info()[2])))
return
# 3. getting network refs
network_ref = db.fixed_ip_get_network(context, fixed_ip)
# 4. security rules (filtering rules)
secgrp_refs = db.security_group_get_by_instance(context, instance_id)
# 5. if any volume is mounted, prepare here.
if 0 != len(shelf_slots):
pass
# 6. create nova-instance-instance-xxx in hypervisor through libvirt
# (This rule can be seen by executing virsh nwfilter-list)
self.driver.setup_nwfilters_for_instance(instance_ref)
# 7. insert filtering rule
for secgrp_ref in secgrp_refs:
self.driver.refresh_security_group(secgrp_ref.id)
# 8. vlan settings
self.network_manager.driver.ensure_vlan_bridge(network_ref['vlan'],
network_ref['bridge'])
# created by masumotok
def nwfilter_for_instance_exists(self, context, instance_id):
"""Check nova-instance-instance-xxx filter exists """
instance_ref = db.instance_get(context, instance_id)
return self.driver.nwfilter_for_instance_exists(instance_ref)
# created by masumotok
def live_migration(self, context, instance_id, dest):
"""executes live migration."""
import time
# 1. ask dest host to preparing live migration.
compute_topic = db.queue_get_for(context, FLAGS.compute_topic, dest)
ret = rpc.call(context,
compute_topic,
{"method": "pre_live_migration",
"args": {'instance_id': instance_id,
'dest': dest}})
if rpc.RemoteError == type(ret):
logging.error('Live migration failed(err at %s)', dest)
db.instance_set_state(context,
instance_id,
power_state.RUNNING,
'running')
return
# waiting for setting up nwfilter(nova-instance-instance-xxx)
# otherwise, live migration fail.
timeout_count = range(FLAGS.live_migration_timeout)
while 0 != len(timeout_count):
ret = rpc.call(context,
compute_topic,
{"method": "nwfilter_for_instance_exists",
"args": {'instance_id': instance_id}})
if ret:
break
timeout_count.pop()
time.sleep(1)
if not ret:
logging.error('Timeout for pre_live_migration at %s', dest)
return
# 2. executing live migration
# live_migration might raises ProcessExecution error, but
# nothing must be recovered in this version.
instance_ref = db.instance_get(context, instance_id)
ret = self.driver.live_migration(instance_ref, dest)
if not ret:
logging.debug('Fail to live migration')
return
+69
View File
@@ -195,6 +195,11 @@ def floating_ip_get_by_address(context, address):
return IMPL.floating_ip_get_by_address(context, address)
# this method is created by masumotok
def floating_ip_update(context, address, values):
"""update floating ip information."""
return IMPL.floating_ip_update(context, address, values)
####################
@@ -334,6 +339,36 @@ def instance_add_security_group(context, instance_id, security_group_id):
security_group_id)
# created by masumotok
def instance_get_all_by_host(context, hostname):
"""Get instances by host"""
return IMPL.instance_get_all_by_host(context, hostname)
# created by masumotok
def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
"""Get instances.vcpus by host and project"""
return IMPL.instance_get_vcpu_sum_by_host_and_project(context,
hostname,
proj_id)
# created by masumotok
def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
"""Get amount of memory by host and project """
return IMPL.instance_get_memory_sum_by_host_and_project(context,
hostname,
proj_id)
# created by masumotok
def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
"""Get total amount of disk by host and project """
return IMPL.instance_get_disk_sum_by_host_and_project(context,
hostname,
proj_id)
###################
@@ -833,3 +868,37 @@ def host_get_networks(context, host):
"""
return IMPL.host_get_networks(context, host)
# below all methods related to host table are created by masumotok
###################
def host_create(context, value):
"""Create a host from the values dictionary."""
return IMPL.host_create(context, value)
def host_get(context, host_id):
"""Get an host or raise if it does not exist."""
return IMPL.host_get(context, host_id)
def host_get_all(context, session=None):
"""Get all hosts or raise if it does not exist."""
return IMPL.host_get_all(context)
def host_get_by_name(context, host):
"""Get an host or raise if it does not exist."""
return IMPL.host_get_by_name(context, host)
def host_update(context, host, values):
"""Set the given properties on an host and update it."""
return IMPL.host_update(context, host, values)
def host_deactivated(context, host):
"""set deleted flag to a given host"""
return IMPL.host_deactivated(context, host)
+131
View File
@@ -394,6 +394,17 @@ def floating_ip_get_by_address(context, address, session=None):
return result
# created by masumotok
@require_context
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context, address, session)
for (key, value) in values.iteritems():
floating_ip_ref[key] = value
floating_ip_ref.save(session=session)
###################
@@ -746,6 +757,52 @@ def instance_add_security_group(context, instance_id, security_group_id):
instance_ref.save(session=session)
# created by masumotok
def instance_get_all_by_host(context, hostname):
session = get_session()
if not session:
session = get_session()
result = session.query(models.Instance
).filter_by(host=hostname
).filter_by(deleted=can_read_deleted(context)
).all()
if None == result:
return []
return result
# created by masumotok
def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id):
session = get_session()
result = session.query(models.Instance
).filter_by(host=hostname
).filter_by(project_id=proj_id
).filter_by(deleted=can_read_deleted(context)
).value(column)
if None == result:
return 0
return result
# created by masumotok
def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
return _instance_get_sum_by_host_and_project(context, 'vcpus', hostname,
proj_id)
# created by masumotok
def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
return _instance_get_sum_by_host_and_project(context, 'memory_mb',
hostname, proj_id)
# created by masumotok
def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
return _instance_get_sum_by_host_and_project(context, 'local_gb',
hostname, proj_id)
###################
@@ -1746,3 +1803,77 @@ def host_get_networks(context, host):
filter_by(deleted=False).\
filter_by(host=host).\
all()
#below all methods related to host table are created by masumotok
###################
@require_admin_context
def host_create(context, values):
host_ref = models.Host()
for (key, value) in values.iteritems():
host_ref[key] = value
host_ref.save()
return host_ref
@require_admin_context
def host_get(context, host_id, session=None):
if not session:
session = get_session()
result = session.query(models.Host
).filter_by(deleted=False
).filter_by(id=host_id
).first()
if not result:
raise exception.NotFound('No host for id %s' % host_id)
return result
@require_admin_context
def host_get_all(context, session=None):
if not session:
session = get_session()
result = session.query(models.Host
).filter_by(deleted=False
).all()
if not result:
raise exception.NotFound('No host record found .')
return result
@require_admin_context
def host_get_by_name(context, host, session=None):
if not session:
session = get_session()
result = session.query(models.Host
).filter_by(deleted=False
).filter_by(name=host
).first()
if not result:
raise exception.NotFound('No host for name %s' % host)
return result
@require_admin_context
def host_update(context, host_id, values):
session = get_session()
with session.begin():
host_ref = host_get(context, host_id, session=session)
for (key, value) in values.iteritems():
host_ref[key] = value
host_ref.save(session=session)
@require_admin_context
def host_deactivated(context, host):
host_update(context, host, {'deleted': True})
+23 -1
View File
@@ -138,6 +138,24 @@ class NovaBase(object):
# __tablename__ = 'hosts'
# id = Column(String(255), primary_key=True)
# this class is created by masumotok
class Host(BASE, NovaBase):
"""Represents a host where services are running"""
__tablename__ = 'hosts'
id = Column(Integer, primary_key=True)
name = Column(String(255))
cpu = Column(Integer, nullable=False, default=-1)
memory_mb = Column(Integer, nullable=False, default=-1)
hdd_gb = Column(Integer, nullable=False, default=-1)
#cpuid = Column(Integer, nullable=False)
deleted = Column(Boolean, default=False)
# C: when calling service_create()
# D: never deleted. instead of deleting cloumn "deleted" is true
# when host is down
# b/c Host.id is foreign key of service, and records
# of the "service" table are not deleted.
# R: Column "deleted" is true when calling hosts_up() and host is down.
class Service(BASE, NovaBase):
"""Represents a running service on a host."""
@@ -526,10 +544,14 @@ def register_models():
it will never need to be called explicitly elsewhere.
"""
from sqlalchemy import create_engine
#models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp,
# FloatingIp, Network, SecurityGroup,
# SecurityGroupIngressRule, SecurityGroupInstanceAssociation,
# AuthToken, User, Project) # , Image, Host
models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp,
FloatingIp, Network, SecurityGroup,
SecurityGroupIngressRule, SecurityGroupInstanceAssociation,
AuthToken, User, Project) # , Image, Host
AuthToken, User, Project, Host) # , Image
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
+7
View File
@@ -112,10 +112,14 @@ class NetworkManager(manager.Manager):
# the designated network host.
ctxt = context.get_admin_context()
for network in self.db.host_get_networks(ctxt, self.host):
print '<<<<<< nova.network.manager.init_host <<<<'
print '<<<<<< nova.network.manager.init_host (%s)<<<<' % network['id']
self._on_set_network_host(ctxt, network['id'])
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
print '<<<<<< nova.network.manager.set_network_host <<<<'
print '<<<<<< nova.network.manager.set_network_host (%s)<<<<' % network_id
logging.debug("setting network host")
host = self.db.network_set_host(context,
network_id,
@@ -452,6 +456,9 @@ class VlanManager(NetworkManager):
self.driver.ensure_vlan_forward(network_ref['vpn_public_address'],
network_ref['vpn_public_port'],
network_ref['vpn_private_address'])
print '--------------------'
print 'UUUUUUPdate dhcp!'
print '--------------------'
self.driver.update_dhcp(context, network_ref['id'])
def setup_compute_network(self, context, instance_id):
+107
View File
@@ -29,6 +29,10 @@ from nova import flags
from nova import manager
from nova import rpc
from nova import utils
# 3 modules are added by masumotok
from nova import exception
from nova.api.ec2 import cloud
from nova.compute import power_state
FLAGS = flags.FLAGS
flags.DEFINE_string('scheduler_driver',
@@ -66,3 +70,106 @@ class SchedulerManager(manager.Manager):
{"method": method,
"args": kwargs})
logging.debug("Casting to %s %s for %s", topic, host, method)
# created by masumotok
def live_migration(self, context, ec2_id, dest):
""" live migration method"""
# 1. get instance id
internal_id = cloud.ec2_id_to_internal_id(ec2_id)
instance_ref = db.instance_get_by_internal_id(context, internal_id)
instance_id = instance_ref['id']
# 2. check dst host still has enough capacities
if not self.has_enough_resource(context, instance_id, dest):
return False
# 3. change instance_state
db.instance_set_state(context,
instance_id,
power_state.PAUSED,
'migrating')
# 4. request live migration
host = instance_ref['host']
rpc.cast(context,
db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": 'live_migration',
"args": {'instance_id': instance_id,
'dest': dest}})
return True
# this method is created by masumotok
def has_enough_resource(self, context, instance_id, dest):
# get instance information
instance_ref = db.instance_get(context, instance_id)
ec2_id = instance_ref['hostname']
vcpus = instance_ref['vcpus']
mem = instance_ref['memory_mb']
hdd = instance_ref['local_gb']
# get host information
host_ref = db.host_get_by_name(context, dest)
total_cpu = int(host_ref['cpu'])
total_mem = int(host_ref['memory_mb'])
total_hdd = int(host_ref['hdd_gb'])
instances_ref = db.instance_get_all_by_host(context, dest)
for i_ref in instances_ref:
total_cpu -= int(i_ref['vcpus'])
total_mem -= int(i_ref['memory_mb'])
total_hdd -= int(i_ref['local_gb'])
# check host has enough information
logging.debug('host(%s) remains vcpu:%s mem:%s hdd:%s,' %
(dest, total_cpu, total_mem, total_hdd))
logging.debug('instance(%s) has vcpu:%s mem:%s hdd:%s,' %
(ec2_id, total_cpu, total_mem, total_hdd))
if total_cpu <= vcpus or total_mem <= mem or total_hdd <= hdd:
logging.debug('%s doesnt have enough resource for %s' %
(dest, ec2_id))
return False
logging.debug('%s has enough resource for %s' % (dest, ec2_id))
return True
# this method is created by masumotok
def show_host_resource(self, context, host, *args):
""" show the physical/usage resource given by hosts."""
try:
host_ref = db.host_get_by_name(context, host)
except exception.NotFound:
return {'ret': False, 'msg': 'No such Host'}
except:
raise
# get physical resource information
h_resource = {'cpu': host_ref['cpu'],
'memory_mb': host_ref['memory_mb'],
'hdd_gb': host_ref['hdd_gb']}
# get usage resource information
u_resource = {}
instances_ref = db.instance_get_all_by_host(context, host_ref['name'])
if 0 == len(instances_ref):
return {'ret': True, 'phy_resource': h_resource, 'usage': {}}
project_ids = [i['project_id'] for i in instances_ref]
project_ids = list(set(project_ids))
for p_id in project_ids:
cpu = db.instance_get_vcpu_sum_by_host_and_project(context,
host,
p_id)
mem = db.instance_get_memory_sum_by_host_and_project(context,
host,
p_id)
hdd = db.instance_get_disk_sum_by_host_and_project(context,
host,
p_id)
u_resource[p_id] = {'cpu': cpu, 'memory_mb': mem, 'hdd_gb': hdd}
return {'ret': True, 'phy_resource': h_resource, 'usage': u_resource}
+22
View File
@@ -72,6 +72,14 @@ class Service(object, service.Service):
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
# this try-except operations are added by masumotok
try:
host_ref = db.host_get_by_name(ctxt, self.host)
except exception.NotFound:
host_ref = db.host_create(ctxt, {'name': self.host})
host_ref = self._update_host_ref(ctxt, host_ref)
try:
service_ref = db.service_get_by_args(ctxt,
self.host,
@@ -109,6 +117,20 @@ class Service(object, service.Service):
'report_count': 0})
self.service_id = service_ref['id']
# created by masumotok
def _update_host_ref(self, context, host_ref):
if 0 <= self.manager_class_name.find('ComputeManager'):
cpu = self.manager.get_cpu_number()
memory_mb = self.manager.get_mem_size()
hdd_gb = self.manager.get_hdd_size()
db.host_update(context,
host_ref['id'],
{'cpu': cpu,
'memory_mb': memory_mb,
'hdd_gb': hdd_gb})
return host_ref
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
+10 -3
View File
@@ -133,9 +133,16 @@ def runthis(prompt, cmd, check_exit_code=True):
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
#modified by masumotok
#characters = '01234567890abcdefghijklmnopqrstuvwxyz'
#choices = [random.choice(characters) for x in xrange(size)]
#return '%s-%s' % (topic, ''.join(choices))
if topic == "i":
return random.randint(0, 2 ** 28 - 1)
else:
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
def generate_mac():
+101
View File
@@ -44,6 +44,8 @@ Supports KVM, QEMU, UML, and XEN.
import logging
import os
import shutil
# appended by masumotok
#import libvirt
import IPy
from twisted.internet import defer
@@ -101,6 +103,10 @@ flags.DEFINE_string('libvirt_uri',
'',
'Override the default libvirt URI (which is dependent'
' on libvirt_type)')
# added by masumotok
flags.DEFINE_string('live_migration_uri',
"qemu+tcp://%s/system",
'Define protocol used by live_migration feature')
flags.DEFINE_bool('allow_project_net_traffic',
True,
'Whether to allow in project network traffic')
@@ -648,6 +654,101 @@ class LibvirtConnection(object):
fw = NWFilterFirewall(self._conn)
fw.ensure_security_group_filter(security_group_id)
# created by masumotok
def setup_nwfilters_for_instance(self, instance):
nwfilter = NWFilterFirewall(self._conn)
return nwfilter.setup_nwfilters_for_instance(instance)
# created by masumotok
def nwfilter_for_instance_exists(self, instance_ref):
try:
filter = 'nova-instance-%s' % instance_ref.name
self._conn.nwfilterLookupByName(filter)
return True
except libvirt.libvirtError:
return False
# created by masumotok
def live_migration(self, instance_ref, dest):
uri = FLAGS.live_migration_uri % dest
out, err = utils.execute("sudo virsh migrate --live %s %s"
% (instance_ref.name, uri))
# wait for completion of live_migration
d = defer.Deferred()
d.addCallback(lambda _: self._post_live_migration(instance_ref, dest))
timer = task.LoopingCall(f=None)
def _wait_for_live_migration():
try:
state = self.get_info(instance_ref.name)['state']
#except libvirt.libvirtError, e:
except exception.NotFound:
timer.stop()
d.callback(None)
timer.f = _wait_for_live_migration
timer.start(interval=0.5, now=True)
return d
# created by masumotok
def _post_live_migration(self, instance_ref, dest):
# 1. detaching volumes
# (not necessary in current version )
#try :
# ec2_id = instance_ref['ec2_id']
# volumes = db.volume_get_by_ec2_id(context, ec2_id)
# for volume in volumes :
# self.detach_volume(context, instance_id, volume.id)
#except exception.NotFound:
# logging.debug('%s doesnt mount any volumes.. ' % ec2_id)
# 2. releasing vlan
# (not necessary in current implementation?)
# 3. releasing security group ingress rule
# (not necessary in current implementation?)
# 4. database updating
ec2_id = instance_ref['hostname']
ctxt = context.get_admin_context()
instance_id = instance_ref['id']
fixed_ip = db.instance_get_fixed_address(ctxt, instance_id)
# not return if fixed_ip is not found, otherwise,
# instance never be accessible..
if None == fixed_ip:
logging.error('fixed_ip is not found for %s ' % ec2_id)
db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
network_ref = db.fixed_ip_get_network(ctxt, fixed_ip)
db.network_update(ctxt, network_ref['id'], {'host': dest})
try:
floating_ip = db.instance_get_floating_address(ctxt, instance_id)
# not return if floating_ip is not found, otherwise,
# instance never be accessible..
if None == floating_ip:
logging.error('floating_ip is not found for %s ' % ec2_id)
floating_ip_ref = db.floating_ip_get_by_address(ctxt, floating_ip)
db.floating_ip_update(ctxt,
floating_ip_ref['address'],
{'host': dest})
except exception.NotFound:
logging.debug('%s doesnt have floating_ip.. ' % ec2_id)
except:
msg = 'Live migration: Unexpected error:'
msg += '%s cannot inherit floating ip.. ' % ec2_id
logging.error(msg)
db.instance_update(ctxt,
instance_id,
{'state_description': 'running',
'state': power_state.RUNNING,
'host': dest})
logging.info('Live migrating %s to %s finishes successfully'
% (ec2_id, dest))
class NWFilterFirewall(object):
"""
+3 -2
View File
@@ -25,6 +25,7 @@ from sphinx.setup_command import BuildDoc
from nova.utils import parse_mailmap, str_dict_replace
class local_BuildDoc(BuildDoc):
def run(self):
for builder in ['html', 'man']:
@@ -54,8 +55,8 @@ setup(name='nova',
author='OpenStack',
author_email='nova@lists.launchpad.net',
url='http://www.openstack.org/',
cmdclass={ 'sdist': local_sdist,
'build_sphinx' : local_BuildDoc },
cmdclass={'sdist': local_sdist,
'build_sphinx': local_BuildDoc},
packages=find_packages(exclude=['bin', 'smoketests']),
include_package_data=True,
scripts=['bin/nova-api',