免费注册 查看新帖 |

Chinaunix

  平台 论坛 博客 文库
最近访问板块 发新帖
查看: 34269 | 回复: 4
打印 上一主题 下一主题

工作队列分析 [复制链接]

论坛徽章:
0
跳转到指定楼层
1 [收藏(0)] [报告]
发表于 2007-01-08 18:26 |只看该作者 |倒序浏览
一、用法
struct cpu_workqueue_struct {

        spinlock_t lock;

        long remove_sequence;        /* Least-recently added (next to run) */
        long insert_sequence;        /* Next to add */

        struct list_head worklist;
        wait_queue_head_t more_work;
        wait_queue_head_t work_done;

        struct workqueue_struct *wq;
        struct task_struct *thread;

        int run_depth;                /* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;

/*
* The externally visible workqueue abstraction is an array of
* per-CPU workqueues:
*/
struct workqueue_struct {
        struct cpu_workqueue_struct *cpu_wq;
        const char *name;
        struct list_head list;         /* Empty if single thread */
};

工作队列的使用很简单。
1.首先是建立一个工作队列:
        struct workqueue_struct *keventd_wq;
        keventd_wq = create_workqueue("events");
2.然后就是在这个队列中insert你所要做的“工作”:
        DECLARE_WORK(work, func, data)       
        queue_work(keventd_wq, work);

struct work_struct {
        unsigned long pending;
        struct list_head entry;
        void (*func)(void *);
        void *data;
        void *wq_data;
        struct timer_list timer;
};

初始化有两种方法。
一种为静态方法:
#define __WORK_INITIALIZER(n, f, d) {                                \
        .entry        = { &(n).entry, &(n).entry },                        \
        .func = (f),                                                \
        .data = (d),                                                \
        .timer = TIMER_INITIALIZER(NULL, 0, 0),                        \
        }

#define DECLARE_WORK(n, f, d)                                        \
        struct work_struct n = __WORK_INITIALIZER(n, f, d)

另一种为动态方法:
/*
* initialize all of a work-struct:
*/
#define INIT_WORK(_work, _func, _data)                                \
        do {                                                        \
                INIT_LIST_HEAD(&(_work)->entry);                \
                (_work)->pending = 0;                                \
                PREPARE_WORK((_work), (_func), (_data));        \
                init_timer(&(_work)->timer);                        \
        } while (0)


二、执行过程
create_workqueue() -> __create_workqueue()

struct workqueue_struct *__create_workqueue(const char *name,
                                            int singlethread)
{
        int cpu, destroy = 0;
        struct workqueue_struct *wq;
        struct task_struct *p;

        wq = kzalloc(sizeof(*wq), GFP_KERNEL);
        //为每个CPU建立一个结构
        wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
        ...
        wq->name = name;
        mutex_lock(&workqueue_mutex);
        if (singlethread) {
                ...
        } else {
                list_add(&wq->list, &workqueues);
                for_each_online_cpu(cpu) {
                        //为每个CPU创建一个线程
                        p = create_workqueue_thread(wq, cpu);
                        if (p) {
                                kthread_bind(p, cpu);
                                //唤醒这个线程执行工作
                                wake_up_process(p);
                        } else
                                destroy = 1;
                }
        }
        mutex_unlock(&workqueue_mutex);
        ...
        return wq;
}

create_workqueue() -> __create_workqueue() -> create_workqueue_thread()

static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
                                                   int cpu)
{
        struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
        struct task_struct *p;

        spin_lock_init(&cwq->lock);
        cwq->wq = wq;
        cwq->thread = NULL;
        cwq->insert_sequence = 0;
        cwq->remove_sequence = 0;
        INIT_LIST_HEAD(&cwq->worklist);
        init_waitqueue_head(&cwq->more_work);
        init_waitqueue_head(&cwq->work_done);

        if (is_single_threaded(wq))
                p = kthread_create(worker_thread, cwq, "%s", wq->name);
        else
                //创建一个线程,这个线程以cwq为数据执行worker_thread这个函数
                p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
        if (IS_ERR(p))
                return NULL;
        cwq->thread = p;  //
        return p;
}


create_workqueue() -> __create_workqueue() -> create_workqueue_thread() -> worker_thread()

//本函数在一个死循环等待工作的到来,这一般在睡眠状态中,等待被唤醒执行工作
//当有工作到来时queue_work()会将这个线程唤醒
static int worker_thread(void *__cwq)
{
        struct cpu_workqueue_struct *cwq = __cwq;
        DECLARE_WAITQUEUE(wait, current);
        ...

        current->flags |= PF_NOFREEZE;

        //设置优先级
        set_user_nice(current, -5);
        ...
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
                //将本线程加入睡眠队列,用于睡眠后可以被唤醒
                add_wait_queue(&cwq->more_work, &wait);

                //如果没用被执行的“工作”,则将自己切换出去,进入睡眠状态
                if (list_empty(&cwq->worklist))
                        schedule();
                else //否则或是被唤醒
                        __set_current_state(TASK_RUNNING);
                remove_wait_queue(&cwq->more_work, &wait);
               
                //工作队列非空,执行工作
                if (!list_empty(&cwq->worklist))
                        run_workqueue(cwq);
                set_current_state(TASK_INTERRUPTIBLE);
        }
        __set_current_state(TASK_RUNNING);
        return 0;
}


        create_workqueue() -> __create_workqueue() -> create_workqueue_thread()
-> worker_thread() -> run_workqueue()
//该函数执行真正的工作
static void run_workqueue(struct cpu_workqueue_struct *cwq)
{
        unsigned long flags;

        spin_lock_irqsave(&cwq->lock, flags);
        ...
        //顺次执行队列中的所有工作
        while (!list_empty(&cwq->worklist)) {
                struct work_struct *work = list_entry(cwq->worklist.next,
                                                struct work_struct, entry);
                void (*f) (void *) = work->func;
                void *data = work->data;

                //从队列中删除待执行的任务
                list_del_init(cwq->worklist.next);
                spin_unlock_irqrestore(&cwq->lock, flags);

                BUG_ON(work->wq_data != cwq);
                clear_bit(0, &work->pending);

                //执行“工作”
                f(data);

                spin_lock_irqsave(&cwq->lock, flags);
                cwq->remove_sequence++;
                wake_up(&cwq->work_done);  //
        }
        cwq->run_depth--;
        spin_unlock_irqrestore(&cwq->lock, flags);
}

三、工作线程创建的详细过程
   create_workqueue() -> __create_workqueue() -> create_workqueue_thread()
-> kthread_create()

struct task_struct *kthread_create(int (*threadfn)(void *data),
                                   void *data,
                                   const char namefmt[],
                                   ...)
{
        //初始化用于创建线程的辅助结构
        struct kthread_create_info create;
        DECLARE_WORK(work, keventd_create_kthread, &create);
        create.threadfn = threadfn;
        create.data = data;
        init_completion(&create.started);
        init_completion(&create.done);

        if (!helper_wq) //首先创建辅助工作队列
                work.func(work.data);
        else {
                //注意,“创建一个工作队列”这个工作本身又是属于helper_wq工作队列
                //的一项工作,所以,将这个工作加入的辅助工作队列中等待执行。
                queue_work(helper_wq, &work);
                wait_for_completion(&create.done);
        }
        ...
        return create.result;
}


        create_workqueue() -> __create_workqueue() -> create_workqueue_thread()
-> kthread_create()-> keventd_create_kthread()
//最终会调用kernel_thread为每个工作队列创建一个线程
//这样,被创建的线程会以create为数据执行kthread(如下),而kthread中则执行create中的threadfn(data),
//即为create_workqueue_thread中的worker_thread(cwq),即为我们工作队列要执行的函数了。

static void keventd_create_kthread(void *_create)
{
        struct kthread_create_info *create = _create;
        int pid;

        /* We want our own signal handler (we take no signals by default). */
        pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
        if (pid < 0) {
                create->result = ERR_PTR(pid);
        } else {
                wait_for_completion(&create->started);
                read_lock(&tasklist_lock);
                create->result = find_task_by_pid(pid);
                read_unlock(&tasklist_lock);
        }
        complete(&create->done);
}

static int kthread(void *_create)
{
        struct kthread_create_info *create = _create;
        int (*threadfn)(void *data);
        void *data;
        ...
        threadfn = create->threadfn;
        data = create->data;
        ...
        if (!kthread_should_stop())
                ret = threadfn(data);
        ...
        return 0;
}

四、插入“工作”
/* Preempt must be disabled. */
static void __queue_work(struct cpu_workqueue_struct *cwq,
                         struct work_struct *work)
{
        unsigned long flags;

        spin_lock_irqsave(&cwq->lock, flags);
        work->wq_data = cwq;
        //将当前工作插入到工作队列中待待执行
        list_add_tail(&work->entry, &cwq->worklist);
        cwq->insert_sequence++;
        wake_up(&cwq->more_work);  //唤醒相应线程
        spin_unlock_irqrestore(&cwq->lock, flags);
}


int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
        int ret = 0, cpu = get_cpu();

        //如里当前工作未在队列中才插入
        if (!test_and_set_bit(0, &work->pending)) {
                ...
                __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
                ret = 1;
        }
        put_cpu();
        return ret;
}

论坛徽章:
0
2 [报告]
发表于 2010-03-13 15:16 |只看该作者

论坛徽章:
0
3 [报告]
发表于 2011-01-20 11:18 |只看该作者

论坛徽章:
0
4 [报告]
发表于 2011-01-20 12:11 |只看该作者
兄弟你这个已经旧版的内核workqueue代码了, 2.6.36里面已经完全调整过了。

Concurrency Managed Workqueue (cmwq)

September, 2010                Tejun Heo <tj@kernel.org>
                        Florian Mickler <florian@mickler.org>

CONTENTS

1. Introduction
2. Why cmwq?
3. The Design
4. Application Programming Interface (API)
5. Example Execution Scenarios
6. Guidelines


1. Introduction

There are many cases where an asynchronous process execution context
is needed and the workqueue (wq) API is the most commonly used
mechanism for such cases.

When such an asynchronous execution context is needed, a work item
describing which function to execute is put on a queue.  An
independent thread serves as the asynchronous execution context.  The
queue is called workqueue and the thread is called worker.

While there are work items on the workqueue the worker executes the
functions associated with the work items one after the other.  When
there is no work item left on the workqueue the worker becomes idle.
When a new work item gets queued, the worker begins executing again.


2. Why cmwq?

In the original wq implementation, a multi threaded (MT) wq had one
worker thread per CPU and a single threaded (ST) wq had one worker
thread system-wide.  A single MT wq needed to keep around the same
number of workers as the number of CPUs.  The kernel grew a lot of MT
wq users over the years and with the number of CPU cores continuously
rising, some systems saturated the default 32k PID space just booting
up.

Although MT wq wasted a lot of resource, the level of concurrency
provided was unsatisfactory.  The limitation was common to both ST and
MT wq albeit less severe on MT.  Each wq maintained its own separate
worker pool.  A MT wq could provide only one execution context per CPU
while a ST wq one for the whole system.  Work items had to compete for
those very limited execution contexts leading to various problems
including proneness to deadlocks around the single execution context.

The tension between the provided level of concurrency and resource
usage also forced its users to make unnecessary tradeoffs like libata
choosing to use ST wq for polling PIOs and accepting an unnecessary
limitation that no two polling PIOs can progress at the same time.  As
MT wq don't provide much better concurrency, users which require
higher level of concurrency, like async or fscache, had to implement
their own thread pool.

Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with
focus on the following goals.

* Maintain compatibility with the original workqueue API.

* Use per-CPU unified worker pools shared by all wq to provide
  flexible level of concurrency on demand without wasting a lot of
  resource.

* Automatically regulate worker pool and level of concurrency so that
  the API users don't need to worry about such details.


3. The Design

In order to ease the asynchronous execution of functions a new
abstraction, the work item, is introduced.

A work item is a simple struct that holds a pointer to the function
that is to be executed asynchronously.  Whenever a driver or subsystem
wants a function to be executed asynchronously it has to set up a work
item pointing to that function and queue that work item on a
workqueue.

Special purpose threads, called worker threads, execute the functions
off of the queue, one after the other.  If no work is queued, the
worker threads become idle.  These worker threads are managed in so
called thread-pools.

The cmwq design differentiates between the user-facing workqueues that
subsystems and drivers queue work items on and the backend mechanism
which manages thread-pool and processes the queued work items.

The backend is called gcwq.  There is one gcwq for each possible CPU
and one gcwq to serve work items queued on unbound workqueues.

Subsystems and drivers can create and queue work items through special
workqueue API functions as they see fit. They can influence some
aspects of the way the work items are executed by setting flags on the
workqueue they are putting the work item on. These flags include
things like CPU locality, reentrancy, concurrency limits and more. To
get a detailed overview refer to the API description of
alloc_workqueue() below.

When a work item is queued to a workqueue, the target gcwq is
determined according to the queue parameters and workqueue attributes
and appended on the shared worklist of the gcwq.  For example, unless
specifically overridden, a work item of a bound workqueue will be
queued on the worklist of exactly that gcwq that is associated to the
CPU the issuer is running on.

For any worker pool implementation, managing the concurrency level
(how many execution contexts are active) is an important issue.  cmwq
tries to keep the concurrency at a minimal but sufficient level.
Minimal to save resources and sufficient in that the system is used at
its full capacity.

Each gcwq bound to an actual CPU implements concurrency management by
hooking into the scheduler.  The gcwq is notified whenever an active
worker wakes up or sleeps and keeps track of the number of the
currently runnable workers.  Generally, work items are not expected to
hog a CPU and consume many cycles.  That means maintaining just enough
concurrency to prevent work processing from stalling should be
optimal.  As long as there are one or more runnable workers on the
CPU, the gcwq doesn't start execution of a new work, but, when the
last running worker goes to sleep, it immediately schedules a new
worker so that the CPU doesn't sit idle while there are pending work
items.  This allows using a minimal number of workers without losing
execution bandwidth.

Keeping idle workers around doesn't cost other than the memory space
for kthreads, so cmwq holds onto idle ones for a while before killing
them.

For an unbound wq, the above concurrency management doesn't apply and
the gcwq for the pseudo unbound CPU tries to start executing all work
items as soon as possible.  The responsibility of regulating
concurrency level is on the users.  There is also a flag to mark a
bound wq to ignore the concurrency management.  Please refer to the
API section for details.

Forward progress guarantee relies on that workers can be created when
more execution contexts are necessary, which in turn is guaranteed
through the use of rescue workers.  All work items which might be used
on code paths that handle memory reclaim are required to be queued on
wq's that have a rescue-worker reserved for execution under memory
pressure.  Else it is possible that the thread-pool deadlocks waiting
for execution contexts to free up.


4. Application Programming Interface (API)

alloc_workqueue() allocates a wq.  The original create_*workqueue()
functions are deprecated and scheduled for removal.  alloc_workqueue()
takes three arguments - @name, @flags and @max_active.  @name is the
name of the wq and also used as the name of the rescuer thread if
there is one.

A wq no longer manages execution resources but serves as a domain for
forward progress guarantee, flush and work item attributes.  @flags
and @max_active control how work items are assigned execution
resources, scheduled and executed.

@flags:

  WQ_NON_REENTRANT

        By default, a wq guarantees non-reentrance only on the same
        CPU.  A work item may not be executed concurrently on the same
        CPU by multiple workers but is allowed to be executed
        concurrently on multiple CPUs.  This flag makes sure
        non-reentrance is enforced across all CPUs.  Work items queued
        to a non-reentrant wq are guaranteed to be executed by at most
        one worker system-wide at any given time.

  WQ_UNBOUND

        Work items queued to an unbound wq are served by a special
        gcwq which hosts workers which are not bound to any specific
        CPU.  This makes the wq behave as a simple execution context
        provider without concurrency management.  The unbound gcwq
        tries to start execution of work items as soon as possible.
        Unbound wq sacrifices locality but is useful for the following
        cases.

        * Wide fluctuation in the concurrency level requirement is
          expected and using bound wq may end up creating large number
          of mostly unused workers across different CPUs as the issuer
          hops through different CPUs.

        * Long running CPU intensive workloads which can be better
          managed by the system scheduler.

  WQ_FREEZEABLE

        A freezeable wq participates in the freeze phase of the system
        suspend operations.  Work items on the wq are drained and no
        new work item starts execution until thawed.

  WQ_RESCUER

        All wq which might be used in the memory reclaim paths _MUST_
        have this flag set.  This reserves one worker exclusively for
        the execution of this wq under memory pressure.

  WQ_HIGHPRI

        Work items of a highpri wq are queued at the head of the
        worklist of the target gcwq and start execution regardless of
        the current concurrency level.  In other words, highpri work
        items will always start execution as soon as execution
        resource is available.

        Ordering among highpri work items is preserved - a highpri
        work item queued after another highpri work item will start
        execution after the earlier highpri work item starts.

        Although highpri work items are not held back by other
        runnable work items, they still contribute to the concurrency
        level.  Highpri work items in runnable state will prevent
        non-highpri work items from starting execution.

        This flag is meaningless for unbound wq.

  WQ_CPU_INTENSIVE

        Work items of a CPU intensive wq do not contribute to the
        concurrency level.  In other words, runnable CPU intensive
        work items will not prevent other work items from starting
        execution.  This is useful for bound work items which are
        expected to hog CPU cycles so that their execution is
        regulated by the system scheduler.

        Although CPU intensive work items don't contribute to the
        concurrency level, start of their executions is still
        regulated by the concurrency management and runnable
        non-CPU-intensive work items can delay execution of CPU
        intensive work items.

        This flag is meaningless for unbound wq.

  WQ_HIGHPRI | WQ_CPU_INTENSIVE

        This combination makes the wq avoid interaction with
        concurrency management completely and behave as a simple
        per-CPU execution context provider.  Work items queued on a
        highpri CPU-intensive wq start execution as soon as resources
        are available and don't affect execution of other work items.

@max_active:

@max_active determines the maximum number of execution contexts per
CPU which can be assigned to the work items of a wq.  For example,
with @max_active of 16, at most 16 work items of the wq can be
executing at the same time per CPU.

Currently, for a bound wq, the maximum limit for @max_active is 512
and the default value used when 0 is specified is 256.  For an unbound
wq, the limit is higher of 512 and 4 * num_possible_cpus().  These
values are chosen sufficiently high such that they are not the
limiting factor while providing protection in runaway cases.

The number of active work items of a wq is usually regulated by the
users of the wq, more specifically, by how many work items the users
may queue at the same time.  Unless there is a specific need for
throttling the number of active work items, specifying '0' is
recommended.

Some users depend on the strict execution ordering of ST wq.  The
combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
behavior.  Work items on such wq are always queued to the unbound gcwq
and only one work item can be active at any given time thus achieving
the same ordering property as ST wq.


5. Example Execution Scenarios

The following example execution scenarios try to illustrate how cmwq
behave under different configurations.

Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU.
w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms
again before finishing.  w1 and w2 burn CPU for 5ms then sleep for
10ms.

Ignoring all other tasks, works and processing overhead, and assuming
simple FIFO scheduling, the following is one highly simplified version
of possible sequences of events with the original wq.

TIME IN MSECS        EVENT
0                w0 starts and burns CPU
5                w0 sleeps
15                w0 wakes up and burns CPU
20                w0 finishes
20                w1 starts and burns CPU
25                w1 sleeps
35                w1 wakes up and finishes
35                w2 starts and burns CPU
40                w2 sleeps
50                w2 wakes up and finishes

And with cmwq with @max_active >= 3,

TIME IN MSECS        EVENT
0                w0 starts and burns CPU
5                w0 sleeps
5                w1 starts and burns CPU
10                w1 sleeps
10                w2 starts and burns CPU
15                w2 sleeps
15                w0 wakes up and burns CPU
20                w0 finishes
20                w1 wakes up and finishes
25                w2 wakes up and finishes

If @max_active == 2,

TIME IN MSECS        EVENT
0                w0 starts and burns CPU
5                w0 sleeps
5                w1 starts and burns CPU
10                w1 sleeps
15                w0 wakes up and burns CPU
20                w0 finishes
20                w1 wakes up and finishes
20                w2 starts and burns CPU
25                w2 sleeps
35                w2 wakes up and finishes

Now, let's assume w1 and w2 are queued to a different wq q1 which has
WQ_HIGHPRI set,

TIME IN MSECS        EVENT
0                w1 and w2 start and burn CPU
5                w1 sleeps
10                w2 sleeps
10                w0 starts and burns CPU
15                w0 sleeps
15                w1 wakes up and finishes
20                w2 wakes up and finishes
25                w0 wakes up and burns CPU
30                w0 finishes

If q1 has WQ_CPU_INTENSIVE set,

TIME IN MSECS        EVENT
0                w0 starts and burns CPU
5                w0 sleeps
5                w1 and w2 start and burn CPU
10                w1 sleeps
15                w2 sleeps
15                w0 wakes up and burns CPU
20                w0 finishes
20                w1 wakes up and finishes
25                w2 wakes up and finishes


6. Guidelines

* Do not forget to use WQ_RESCUER if a wq may process work items which
  are used during memory reclaim.  Each wq with WQ_RESCUER set has one
  rescuer thread reserved for it.  If there is dependency among
  multiple work items used during memory reclaim, they should be
  queued to separate wq each with WQ_RESCUER.

* Unless strict ordering is required, there is no need to use ST wq.

* Unless there is a specific need, using 0 for @max_active is
  recommended.  In most use cases, concurrency level usually stays
  well under the default limit.

* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
  flush and work item attributes.  Work items which are not involved
  in memory reclaim and don't need to be flushed as a part of a group
  of work items, and don't require any special attribute, can use one
  of the system wq.  There is no difference in execution
  characteristics between using a dedicated wq and a system wq.

* Unless work items are expected to consume a huge amount of CPU
  cycles, using a bound wq is usually beneficial due to the increased
  level of locality in wq operations and work item execution.

论坛徽章:
0
5 [报告]
发表于 2014-05-23 20:46 |只看该作者
这么长的英语
您需要登录后才可以回帖 登录 | 注册

本版积分规则 发表回复

  

北京盛拓优讯信息技术有限公司. 版权所有 京ICP备16024965号-6 北京市公安局海淀分局网监中心备案编号:11010802020122 niuxiaotong@pcpop.com 17352615567
未成年举报专区
中国互联网协会会员  联系我们:huangweiwei@itpub.net
感谢所有关心和支持过ChinaUnix的朋友们 转载本站内容请注明原作者名及出处

清除 Cookies - ChinaUnix - Archiver - WAP - TOP