get_sd_load_idx
这里面涉及到了一个unsigned long cpu_load[CPU_LOAD_IDX_MAX];数组。scheduler会更具不同的load balance类型(busy,newly idle,idle)选择不同的load进行计算,主要是在update_sg_lb_stats时进行计算
/**
* get_sd_load_idx - Obtain the load index for a given sched domain.
* @sd: The sched_domain whose load_idx is to be obtained.
* @idle: The idle status of the CPU for whose sd load_idx is obtained.
*
* Return: The load index.
*/
static inline int get_sd_load_idx(struct sched_domain *sd,
enum cpu_idle_type idle)
{
int load_idx;
switch (idle) {
case CPU_NOT_IDLE:
load_idx = sd->busy_idx;
break;
case CPU_NEWLY_IDLE:
load_idx = sd->newidle_idx;
break;
default:
load_idx = sd->idle_idx;
break;
}
return load_idx;
}
set_sd_overutilized&clear_sd_overutilized
static void set_sd_overutilized(struct sched_domain *sd)
{
trace_sched_overutilized(sd, sd->shared->overutilized, true);
sd->shared->overutilized = true;
}
static void clear_sd_overutilized(struct sched_domain *sd)
{
trace_sched_overutilized(sd, sd->shared->overutilized, false);
sd->shared->overutilized = false;
}
update_sd_lb_stats
update_blocked_averages实际上可以视为更新cpu上entity的load和cpu的load值,详见
https://blog.youkuaiyun.com/feifei_csdn/article/details/107332155
/**
* update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @env: The load balancing environment.
* @sds: variable to hold the statistics for this sched_domain.
*/
static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
{
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat;
struct sg_lb_stats tmp_sgs;
int load_idx;
bool overload = false, overutilized = false, misfit_task = false;
bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
#ifdef CONFIG_NO_HZ_COMMON//一般配置的都是true
if (env->idle == CPU_NEWLY_IDLE) {
int cpu;
/* Update the stats of NOHZ idle CPUs in the sd */
for_each_cpu_and(cpu, sched_domain_span(env->sd),
nohz.idle_cpus_mask) {
struct rq *rq = cpu_rq(cpu);
/* ... Unless we've already done since the last tick */
// a在b的后面
if (time_after(jiffies, rq->last_blocked_load_update_tick))
update_blocked_averages(cpu);
}
}
/*
* If we've just updated all of the NOHZ idle CPUs, then we can push
* back the next nohz.next_update, which will prevent an unnecessary
* wakeup for the nohz stats kick
*/
if (cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd)))
nohz.next_update = jiffies + LOAD_AVG_PERIOD;
#endif
//load_idx 实际上在sd的rq->cpu_load[load_idx-1]
load_idx = get_sd_load_idx(env->sd, env->idle);
/* (7.3.1.2) 逐个轮询本层级sched_group链表中的每个sched_group */
do {
struct sg_lb_stats *sgs = &tmp_sgs;
int local_group;
/* (7.3.1.3) 如果sg是当前cpu所在的sg,则本sg称为local_group