update_load_avg
/* Update task and its cfs_rq load average */
static inline void update_load_avg(struct sched_entity *se, int flags)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);//se中的cfs_rq字段
u64 now = cfs_rq_clock_task(cfs_rq);
struct rq *rq = rq_of(cfs_rq);
int cpu = cpu_of(rq);
int decayed;
/*
* Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration
*/
if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
__update_load_avg_se(now, cpu, cfs_rq, se);
decayed = update_cfs_rq_load_avg(now, cfs_rq);
decayed |= propagate_entity_load_avg(se);
if (decayed && (flags & UPDATE_TG))
update_tg_load_avg(cfs_rq, 0);
}
__update_load_avg_se
更新entity的load avg,核心是调用到了 ___update_load_avg函数
static int
__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (___update_load_avg(now, cpu, &se->avg,
se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL, 0, 0)) {
trace_sched_load_se(se);
cfs_se_util_change(&se->avg);
#ifdef UTIL_EST_DEBUG
/*
* Trace utilization only for actual tasks.
*
* These trace events are mostly useful to get easier to
* read plots for the estimated utilization, where we can
* compare it with the actual grow/decrease of the original
* PELT signal.
* Let's keep them disabled by default in "production kernels".
*/
if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
trace_sched_util_est_task(tsk, &se->avg);
/* Trace utilization only for top le