update_blocked_averages
static void update_blocked_averages(int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq;
struct rq_flags rf;
const struct sched_class *curr_class;
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
/*
* Iterates(迭代) the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details.
*/
//把一個 RunQueue上的所有leaf cfs_rq走過一遍
for_each_leaf_cfs_rq(rq, cfs_rq) {
struct sched_entity *se;
/* throttled entities(节制的实体) do not contribute to load */
if (throttled_hierarchy(cfs_rq))
continue;
if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
update_tg_load_avg(cfs_rq, 0);
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
update_load_avg(se, 0);//更新entity的load
}
curr_class = rq->curr->sched_class;
update_rt_rq_load_avg(rq_clock_task(rq), cpu, &rq->rt,
curr_class == &rt_sched_class);
update_irq_load_avg(rq, 0);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
#endif
rq_unlock_irqrestore(rq, &rf);
}