本文基于openstack Stein 版本
上文中,volume创建经api已经rpc cast 传给了 scheduler.
(1) cinder\scheduler\manager.py
SchedulerManager.create_volume 函数对创建请求进行响应,并调用cinder.scheduler.flows.api.get_flow来创建。。
cinder.scheduler.manager.SchedulerManager.create_volume
try:
flow_engine = create_volume.get_flow(context,
self.driver,
request_spec,
filter_properties,
volume,
snapshot_id,
image_id,
backup_id)
except Exception:
msg = _("Failed to create scheduler manager volume flow")
LOG.exception(msg)
raise exception.CinderException(msg)
(2) cinder\scheduler\flows\api\create_volume.py
get_flow函数先执行 ExtractSchedulerSpecTask 任务获取请求信息,再调用 ScheduleCreateVolumeTask 调度volume。
cinder.scheduler.flows.create_volume.ScheduleCreateVolumeTask.execute:
try:
if CONF.enable_multi_ceph:
get_remote_image_service = glance.get_remote_image_service
image_href = request_spec.get('image_id')
if image_href:
image_service, image_id = get_remote_image_service(context,
image_href)
image_location = image_service.get_location(context,
image_id)
filter_properties['image_location'] = image_location
self.driver_api.schedule_create_volume(context, request_spec,
filter_properties)
except Exception as e:
self.message_api.create(
context,
message_field.Action.SCHEDULE_ALLOCATE_VOLUME,
resource_uuid=request_spec['volume_id'],
exception=e)
# An error happened, notify on the scheduler queue and log that
# this happened and set the volume to errored out and reraise the
# error *if* exception caught isn't NoValidBackend. Otherwise *do
# not* reraise (since what's the point?)
with excutils.save_and_reraise_exception(
reraise=not isinstance(e, exception.NoValidBackend)):
try:
self._handle_failure(context, request_spec, e)
finally:
common.error_out(volume, reason=e)
(3) cinder\scheduler\filter_scheduler.py
FilterScheduler. schedule_create_volume 函数对创建请求调度选出backend,如果没有合适的backend则抛异常报no weighed backend, 调用 _get_weighted_candidates 从 cinder service中根据scheduler的filter选出合适的backends,再调用 get_weighed_backends 选出最优的backends, 最终调用volume rpc 来创建volume。。
cinder.scheduler.filter_scheduler.FilterScheduler.schedule_create_volume:
def _get_weighted_candidates(self, context, request_spec,
filter_properties=None):
"""Return a list of backends that meet required specs.
Returned list is ordered by their fitness.
"""
elevated = context.elevated()
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, copying
# 'volume_XX' to 'resource_XX' will make both filters happy.
volume_type = request_spec.get("volume_type")
resource_type = volume_type if volume_type is not None else {}
config_options = self._get_configuration_options()
if filter_properties is None:
filter_properties = {}
self._populate_retry(filter_properties,
request_spec)
request_spec_dict = jsonutils.to_primitive(request_spec)
filter_properties.update({'context': context,
'request_spec': request_spec_dict,
'config_options': config_options,
'volume_type': volume_type,
'resource_type': resource_type})
self.populate_filter_properties(request_spec,
filter_properties)
# If multiattach is enabled on a volume, we need to add
# multiattach to extra specs, so that the capability
# filtering is enabled.
multiattach = request_spec['volume_properties'].get('multiattach',
False)
if multiattach and 'multiattach' not in resource_type.get(
'extra_specs', {}):
if 'extra_specs' not in resource_type:
resource_type['extra_specs'] = {}
resource_type['extra_specs'].update(
multiattach='<is> True')
# Revert volume consumed capacity if it's a rescheduled request
retry = filter_properties.get('retry', {})
if retry.get('backends', []):
self.host_manager.revert_volume_consumed_capacity(
retry['backends'][-1],
request_spec['volume_properties']['size'])
# Find our local list of acceptable backends by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
backends = self.host_manager.get_all_backend_states(elevated)
# Filter local hosts based on requirements ...
backends = self.host_manager.get_filtered_backends(backends,
filter_properties)
if not backends:
return []
LOG.debug("Filtered %s", backends)
# weighted_backends = WeightedHost() ... the best
# backend for the job.
weighed_backends = self.host_manager.get_weighed_backends(
backends, filter_properties)
return weighed_backends
def _schedule(self, context, request_spec, filter_properties=None):
weighed_backends = self._get_weighted_candidates(context, request_spec,
filter_properties)
# When we get the weighed_backends, we clear those backends that don't
# match the resource's backend (it could be assigend from group,
# snapshot or volume).
resource_backend = request_spec.get('resource_backend')
if weighed_backends and resource_backend:
resource_backend_has_pool = bool(utils.extract_host(
resource_backend, 'pool'))
# Get host name including host@backend#pool info from
# weighed_backends.
for backend in weighed_backends[::-1]:
backend_id = (
backend.obj.backend_id if resource_backend_has_pool
else utils.extract_host(backend.obj.backend_id)
)
if backend_id != resource_backend:
weighed_backends.remove(backend)
if not weighed_backends:
LOG.warning('No weighed backend found for volume '
'with properties: %s',
filter_properties['request_spec'].get('volume_type'))
return None
return self._choose_top_backend(weighed_backends, request_spec)
def schedule_create_volume(self, context, request_spec, filter_properties):
backend = self._schedule(context, request_spec, filter_properties)
if not backend:
raise exception.NoValidBackend(reason=_("No weighed backends "
"available"))
backend = backend.obj
volume_id = request_spec['volume_id']
updated_volume = driver.volume_update_db(
context, volume_id,
backend.host,
backend.cluster_name,
availability_zone=backend.service['availability_zone'])
self._post_select_populate_filter_properties(filter_properties,
backend)
# context is not serializable
filter_properties.pop('context', None)
self.volume_rpcapi.create_volume(context, updated_volume, request_spec,
filter_properties,
allow_reschedule=True)
至此,scheduler侧的任务已经完成!