【内核调度、负载计算】【update_load_avg】

update_load_avg

/* Update task and its cfs_rq load average */
static inline void update_load_avg(struct sched_entity *se, int flags)
{
	struct cfs_rq *cfs_rq = cfs_rq_of(se);//se中的cfs_rq字段
	u64 now = cfs_rq_clock_task(cfs_rq);
	struct rq *rq = rq_of(cfs_rq);
	int cpu = cpu_of(rq);
	int decayed;

	/*
	 * Track task load average for carrying it to new CPU after migrated, and
	 * track group sched_entity load average for task_h_load calc in migration
	 */
	if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
		__update_load_avg_se(now, cpu, cfs_rq, se);

	decayed  = update_cfs_rq_load_avg(now, cfs_rq);
	decayed |= propagate_entity_load_avg(se);

	if (decayed && (flags & UPDATE_TG))
		update_tg_load_avg(cfs_rq, 0);
}

__update_load_avg_se

更新entity的load avg,核心是调用到了  ___update_load_avg函数

static int
__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	if (___update_load_avg(now, cpu, &se->avg,
			       se->on_rq * scale_load_down(se->load.weight),
			       cfs_rq->curr == se, NULL, 0, 0)) {
		trace_sched_load_se(se);
		cfs_se_util_change(&se->avg);

#ifdef UTIL_EST_DEBUG
		/*
		 * Trace utilization only for actual tasks.
		 *
		 * These trace events are mostly useful to get easier to
		 * read plots for the estimated utilization, where we can
		 * compare it with the actual grow/decrease of the original
		 * PELT signal.
		 * Let's keep them disabled by default in "production kernels".
		 */
		if (entity_is_task(se)) {
			struct task_struct *tsk = task_of(se);

			trace_sched_util_est_task(tsk, &se->avg);

			/* Trace utilization only for top le
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值