detach_tasks
在load balance中会将busiest cpu上的task detach掉,然后atach在local cpu上。返回值为detach的task的数量
/*
* detach_tasks() -- tries to detach up to imbalance weighted load from
* busiest_rq, as part of a balancing operation(操作) within domain "sd".
*
* Returns number of detached tasks if successful and 0 otherwise.
*/
// 返回值为detach的task的数量
static int detach_tasks(struct lb_env *env, struct rq_flags *rf)
{
//src_rq是busiest的rq
struct list_head *tasks = &env->src_rq->cfs_tasks;
struct task_struct *p;
unsigned long load;
int detached = 0;
lockdep_assert_held(&env->src_rq->lock);
if (env->imbalance <= 0)
return 0;
/* (7.6.1) 遍历busiest rq中的进程 */
while (!list_empty(tasks)) {
/*
* We don't want to steal all, otherwise we may be treated likewise,
* which could at worst lead to a livelock crash.
*/
/* (7.6.2) 如果dest cpu不是idle,不能将busiest cpu迁移到idle状态 */
if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
break;
p = list_first_entry(tasks, struct task_struct, se.group_node);
env->loop++;
/* We've more or less seen every task there is, call it quits */
/* (7.6.3) 遍历任务最多不超过sysctl_sched_nr_migrate(32) */
if (env->loop > env->loop_max)
break;
/* take a breather every nr_migrate tasks */
/* (7.6.4) 每sched_nr_migrate_break个任务遍历需要跳出休息一下,
如果没有达到env->loop_max,后面会重来*/
if (env->loop > env->loop_break) {
env->loop_break += sched_nr_migrate_break;
env->flags |= LBF_NEED_BREAK;
break;
}
/* (7.6.5) 判断任务是否支持迁移? */
if (!can_migrate_task(p, env))
goto next;
/* (7.6.6) 获取p进程相对顶层cfs_rq的负载,
根据负载判断进程是否适合迁移*/
load = task_h_load(p);
// LB_MIN 定义的是false
if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
goto next;
if ((load / 2) > env->imbalance)
goto next;
// 把task加到env的list里面
detach_task(p, env, rf);
list_add(&p->se.group_node, &env->tasks);
detached++;
env->imbalance -= load;
#ifdef CONFIG_PREEMPT