相关结构体global_cwq,cpu_workqueue_struct, workqueue_struct, work_struct and woker
/* 每个cpu对应这样一个对象,而不是每个workqueue对应一个* Global per-cpu workqueue. There's one and only one for each cpu
* and all works are queued and processed here regardless of their
* target workqueues.
*/
struct global_cwq {
spinlock_t lock; /* the gcwq lock */
struct list_head worklist; /* L: list of pending works */
unsigned int cpu; /* I: the associated cpu */
unsigned int flags; /* L: GCWQ_* flags */
int nr_workers; /* L: total number of workers */
int nr_idle; /* L: currently idle ones */
/* workers are chained either in the idle_list or busy_hash */
struct list_head idle_list; /* X: list of idle workers */
struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
/* L: hash of busy workers */
struct timer_list idle_timer; /* L: worker idle timeout */
struct timer_list mayday_timer; /* L: SOS timer for dworkers */
struct ida worker_ida; /* L: for worker IDs */
struct task_struct *trustee; /* L: for gcwq shutdown */
unsigned int trustee_state; /* L: trustee state */
wait_queue_head_t trustee_wait; /* trustee wait */
struct worker *first_idle; /* L: first idle worker */
} ____cacheline_aligned_in_smp;
struct cpu_workqueue_struct {
struct global_cwq *gcwq; /* I: the associated gcwq */
struct workqueue_struct *wq; /* I: the owning workqueue */
int work_color; /* L: current color */
int flush_color; /* L: flushing color */
int nr_in_flight[WORK_NR_COLORS];
/* L: nr of in_flight works */
int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */
};
/*
* The externally visible workqueue abstraction is an array of
* per-CPU workqueues:
*/
struct workqueue_struct {
unsigned int flags; /* W: WQ_* flags */
union {
struct cpu_workqueue_struct __percpu *pcpu; /*这里会是一个数组?*/
struct cpu_workqueue_struct *single;
unsigned long v;
} cpu_wq; /* I: cwq's */
struct list_head list; /* W: list of all workqueues */
struct mutex flush_mutex; /* protects wq flushing */
int work_color; /* F: current work color */
int flush_color; /* F: current flush color */
atomic_t nr_cwqs_to_flush; /* flush in progress */
struct wq_flusher *first_flusher; /* F: first flusher */
struct list_head flusher_queue; /* F: flush waiters */
struct list_head flusher_overflow; /* F: flush overflow list */
mayday_mask_t mayday_mask; /* cpus requesting rescue */
struct worker *rescuer; /* I: rescue worker */
int nr_drainers; /* W: drain in progress */
int saved_max_active; /* W: saved cwq max_active */
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
char name[]; /* I: workqueue name */
};
/*
* The poor guys doing the actual heavy lifting. All on-duty workers
* are either serving the manager role, on idle list or on busy hash.
*/
struct worker {
/* on idle list while idle, on busy hash table while busy */
union {
struct list_head entry; /* L: while idle */
struct hlist_node hentry; /* L: while busy */
};
struct work_struct *current_work; /* L: work being processed */
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
struct list_head scheduled; /* L: scheduled works */
struct task_struct *task; /* I: worker task */
struct global_cwq *gcwq; /* I: the associated gcwq */
/* 64 bytes boundary on 64bit, 32 on 32bit */
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
struct work_struct rebind_work; /* L: rebind worker to cpu */
};
struct work_struct {
atomic_long_t data;
struct list_head entry;
work_func_t func;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
};
typedef void (*work_func_t)(struct work_struct *work);
创建 worker and worker thread
kernel/workqueue.cearly_initcall(init_workqueues);
int __init init_workqueues(void)
{
unsigned int cpu;
/* initialize gcwqs :根据CPU的个数,得到对应的global_cwq*/
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
INIT_LIST_HEAD(&gcwq->worklist);
gcwq->cpu = cpu;
gcwq->flags |= GCWQ_DISASSOCIATED;
}
/* create the initial worker */
for_each_online_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker *worker;
if (cpu != WORK_CPU_UNBOUND)
gcwq->flags &= ~GCWQ_DISASSOCIATED;
worker = create_worker(gcwq, true);
BUG_ON(!worker);
spin_lock_irq(&gcwq->lock);
start_worker(worker);
spin_unlock_irq(&gcwq->lock);
}
/*system workqueue_struct*/
system_wq = alloc_workqueue("events", 0, 0);
system_long_wq = alloc_workqueue("events_long", 0, 0);
system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
WQ_UNBOUND_MAX_ACTIVE);
system_freezable_wq = alloc_workqueue("events_freezable",
WQ_FREEZABLE, 0);
system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
}
从
create_works
create_works的输入参数包括global_cwq 和 是否绑定到某个CPU
{
/*这里创建了worker 和 worker对应的task*/
worker = alloc_worker();
worker->gcwq = gcwq;if (!on_unbound_cpu)
worker->task = kthread_create_on_node(worker_thread,
worker,
cpu_to_node(gcwq->cpu),
"kworker/%u:%d", gcwq->cpu, id);
else
worker->task = kthread_create(worker_thread, worker,
"kworker/u:%d", id);
return worker;
}
bund to specific cpu
23023 2 0 e34760a0 IN 0.0 0 0 [kworker/0:0]
26249 2 0 e4c599c0 IN 0.0 0 0 [kworker/0:3]
27167 2 0 e35fdb00 IN 0.0 0 0 [kworker/0:2]
27179 2 0 e44cb9e0 IN 0.0 0 0 [kworker/0:4]
28913 2 0 e068e9c0 IN 0.0 0 0 [kworker/0:1]
28993 2 1 ce5db660 IN 0.0 0 0 [kworker/1:1]
28996 2 1 e451b5a0 IN 0.0 0 0 [kworker/1:0]
unbund
28499 2 0 e4711520 IN 0.0 0 0 [kworker/u:0]
27066 2 0 e69f2180 IN 0.0 0 0 [kworker/u:1]
26450 2 0 e068e040 IN 0.0 0 0 [kworker/u:2]
27168 2 0 d810e620 UN 0.0 0 0 [kworker/u:3]
27286 2 0 e2b2a660 IN 0.0 0 0 [kworker/u:4]
27288 2 0 e3782a60 IN 0.0 0 0 [kworker/u:5]
27351 2 0 e425a5a0 IN 0.0 0 0 [kworker/u:6]
27352 2 0 e5359a60 IN 0.0 0 0 [kworker/u:7]
27353 2 0 e362e9c0 IN 0.0 0 0 [kworker/u:8]
27354 2 0 e451ba60 IN 0.0 0 0 [kworker/u:9]
static void start_worker(struct worker *worker)
{
worker->flags |= WORKER_STARTED;
worker->gcwq->nr_workers++;
worker_enter_idle(worker);
wake_up_process(worker->task);
}
创建workqueue_struct
#define alloc_workqueue(fmt, flags, max_active, args...) \
({ \
static struct lock_class_key __key; \
const char *__lock_name; \
\
if (__builtin_constant_p(fmt)) \
__lock_name = (fmt); \
else \
__lock_name = #fmt; \
\
__alloc_workqueue_key((fmt), (flags), (max_active), \
&__key, __lock_name, ##args); \
})
struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
unsigned int flags,
int max_active,
struct lock_class_key *key,
const char *lock_name, ...)
{
struct workqueue_struct *wq;
wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
INIT_LIST_HEAD(&wq->list);
/*这里创建了workqueue_struct 和 cpu_workqueue_struct*/
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
struct global_cwq *gcwq = get_gcwq(cpu);
BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
cwq->gcwq = gcwq;
cwq->wq = wq;
cwq->flush_color = -1;
cwq->max_active = max_active;
INIT_LIST_HEAD(&cwq->delayed_works);
}
list_add(&wq->list, &workqueues);
return wq;
}
从global_cwq,workqueue_struct, cpu_workqueue_struct, woker的创建过程,可以看出
global_cwq是各结构体之间的纽带,这里还没有work_struct
worker_thread
/**
* worker_thread - the worker thread function
* @__worker: self
*
* The gcwq worker thread function. There's a single dynamic pool of
* these per each cpu. These workers process all works regardless of
* their specific target workqueue. The only exception is works which
* belong to workqueues with a rescuer which will be explained in
* rescuer_thread().
*/
need_more_worker
/*
* Need to wake up a worker? Called from anything but currently
* running workers.
*/
__need_more_worker最终来自per-cpu变量 gcwq_nr_running
static atomic_t *get_gcwq_nr_running(unsigned int cpu)
{
if (cpu != WORK_CPU_UNBOUND)
return &per_cpu(gcwq_nr_running, cpu);
else
return &unbound_gcwq_nr_running;
}
static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
static bool need_more_worker(struct global_cwq *gcwq)
{
return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
}
int worker_thread(void *__worker)
{ /*看来输入参数已经被赋值为worker*/struct worker *worker = __worker;
struct global_cwq *gcwq = worker->gcwq;
wake_up:
/* no more worker necessary? */
if (! need_more_worker(gcwq))
goto sleep;
do {
/*从gcwq的worklist中得到一个work_struct*/
list_first_entry(&gcwq->worklist, struct work_struct, entry);
if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
/* optimization path, not strictly necessary */
process_one_work(worker, work);
if (unlikely(!list_empty(&worker->scheduled)))
process_scheduled_works(worker);
} else {
move_linked_works(work, &worker->scheduled, NULL);
process_scheduled_works(worker);
}
} while (keep_working(gcwq));
sleep:
worker_enter_idle(worker);
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
goto woke_up;
}
void process_scheduled_works(struct worker *worker)
{
while (!list_empty(&worker->scheduled)) {
struct work_struct *work = list_first_entry(&worker->scheduled,
struct work_struct, entry);
process_one_work(worker, work);
}
}
static void process_one_work(struct worker *worker, struct work_struct *work)
{
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct global_cwq *gcwq = cwq->gcwq;
struct hlist_head *bwh = busy_worker_head(gcwq, work);
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
work_func_t f = work->func;
/* claim and process */
debug_work_deactivate(work);
hlist_add_head(&worker->hentry, bwh);
worker->current_work = work;
worker->current_cwq = cwq;
trace_workqueue_execute_start(work);
f(work);
/*
* While we must be careful to not use "work" after this, the trace
* point will only record its address.
*/
trace_workqueue_execute_end(work);
}
/**************************************************************************************/
向外导出的接口
/**************************************************************************************/
创建 work_struct*
INIT_WORK(&tty->buf.work, flush_to_ldisc);
schedule_work(&tty->buf.work);
schedule_work
/*struct workqueue_struct *system_wq;
* 加入已知的workqueue_struct: system_wq = alloc_workqueue("events", 0, 0);
**/
int schedule_work(struct work_struct *work)
{
return queue_work(system_wq, work);
}
queue_work
int queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
int ret;
ret = queue_work_on(get_cpu(), wq, work);
put_cpu();
return ret;
}
/* queue_work_on - queue work on specific cpu*/
int
queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
{
int ret = 0;
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_work(cpu, wq, work);
ret = 1;
}
return ret;
}
void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
struct cpu_workqueue_struct *cwq;
/* gcwq determined, get cwq and queue */
cwq = get_cwq(gcwq->cpu, wq);
/*到这里没有了workqueue_struct的概念,转换为cpu_workqueue_struct*/
insert_work(cwq, work, worklist, work_flags);
}
static void insert_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work, struct list_head *head,
unsigned int extra_flags)
{ /*cpu_workqueue_struct -> global_cwq*/
struct global_cwq *gcwq = cwq->gcwq;
list_add_tail(&work->entry, head);
if (__need_more_worker(gcwq))
wake_up_worker(gcwq);
}
wake_up worker thread
static void wake_up_worker(struct global_cwq *gcwq)
{
struct worker *worker = first_worker(gcwq);
if (likely(worker))
wake_up_process(worker->task);
}
初始化的过程:
work_struct -> workqueue_struct -> cpu_workqueue_struct -> global_cwq
执行work_struct的过程正好是反向:
global_cwq -> worklist -> workfunction.
worker_thread 睡眠,直到queue_work添加work_struct 唤醒该进程。
create_workqueue会生成新的thread:kthread_create(rescuer_thread, wq, "%s", wq->name);
总结:
相关结构体之间的关系;
多个work_struct对应一个workqueue_struct, 有多个进程可以执行work_struct, 通过某种算法work_struct会在一个
合适的进程上执行。
workqueue提供了一种推迟执行的方法,且是在进程上下文实现的。
向外提供的接口包括INIT_WORK, schedule_work;使用共享的队列/worker.
不管创建多少个work_struct,都是使用默认创建的workqueue: system_wq = alloc_workqueue("events", 0, 0);
然后从默认创建的worker中查找一个合适进程,使用该进程去执行加入的 work_struct.
当然也提供了创建新的workqueue和对应worker的方法:
create_workqueue./ queue_work.