struct cfs_rq
在系统中至少有一个CFS运行队列,其就是根CFS运行队列,而其他的进程组 和进程都包含在此运行队列中,不同的是进程组又有它自己的CFS运行队列,其运行队列中包含的 是此进程组中的所有进程。当调度器从根CFS运行队列中选择了一个进程组进行调度时,进程组会 从自己的CFS运行队列中选择一个调度实体进行调度(这个调度实体可能为进程,也可能又是一个 子进程组),就这样一直深入,直到最后选出一个进程进行运行为止。
cfs_rq实际上是rq中与cfs相关的字段
/* CFS-related fields in a runqueue */
struct cfs_rq {
/*
该cfs_rq的load,它只计算它本层下面的se的weight之和,并不是这个se的load,也不是递归到叶子节点上的所有se weight之和(理解这点非常重要)*/
struct load_weight load;/*所有进程的累计负荷值*/
//h_nr_running只对于组才有效,包括底层所有cfs_rq的nr_running之和
unsigned int nr_running, h_nr_running;nr_running/*当前就绪队列的进程数*/
u64 exec_clock;//该cfs_rq总共占用的cpu时间(物理),只累计本层
/*
* 当前CFS队列上最小运行时间,单调递增
* 两种情况下更新该值:
* 1、更新当前运行任务的累计运行时间时
* 2、当任务从队列删除去,如任务睡眠或退出,这时候会查看剩下的任务的vruntime是否大于min_vruntime,如果是则更新该值。
*/
//用于调整se的vruntime,它是递增的,但不一定是该cfs_rq里所有se最小
u64 min_vruntime; //该cpu运行队列的vruntime推进值, 一般是红黑树中最小的vruntime值
#ifndef CONFIG_64BIT
u64 min_vruntime_copy;
#endif
struct rb_root tasks_timeline;/*红黑树的头结点*/
struct rb_node *rb_leftmost;/*红黑树的最左面节点*/
/*
* 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running).
*/
// current是正在被调用的实体对象
//当前运行的se(对于组虽然它不会在cpu上运行,但是当它的下层有一个task在cpu上运行,那么它所在的cfs_rq就把它当做是该cfs_rq上当前正在运行的se)
struct sched_entity *curr, *next, *last, *skip;
/*
* 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running).
* curr: 当前正在运行的sched_entity(对于组虽然它不会在cpu上运行,但是当它的下层有一个task在cpu上运行,那么它所在的cfs_rq就把它当做是该cfs_rq上当前正在运行的sched_entity)
* next: 表示有些进程急需运行,即使不遵从CFS调度也必须运行它,调度时会检查是否next需要调度,有就调度next
*
* skip: 略过进程(不会选择skip指定的进程调度)
*/
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
#endif
#ifdef CONFIG_SMP
/*
* CFS load tracking
*/
struct sched_avg avg;
u64 runnable_load_sum;
unsigned long runnable_load_avg;
#ifdef CONFIG_64BIT_ONLY_CPU
unsigned long runnable_load_avg_32bit;
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
unsigned long tg_load_avg_contrib;
unsigned long propagate_avg;
#endif
atomic_long_t removed_load_avg, removed_util_avg;
#ifndef CONFIG_64BIT
u64 load_last_update_time_copy;
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* h_load = weight * f(tg)
*
* Where f(tg) is the recursive weight fraction assigned to
* this group.
*/
unsigned long h_load;
u64 last_h_load_update;
struct sched_entity *h_load_next;
#endif /* CONFIG_FAIR_GROUP_SCHED */
#endif /* CONFIG_SMP */
#ifdef CONFIG_FAIR_GROUP_SCHED
/* 所属于的CPU rq */
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
/*
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
* (like users, containers etc.)
*
* leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
* list is used during load balance.
*/
int on_list;
struct list_head leaf_cfs_rq_list;
/*属于这个cfs_rq的进程组*/
struct task_group *tg; /* group that "owns" this runqu