[Linux API]linux 工作队列workqueue

来源:互联网 发布:台北代表处 知乎 编辑:程序博客网 时间:2024/06/05 13:29

1,功能描述:

Linux中的Workqueue机制就是为了简化内核线程的创建。通过调用workqueue的接口就能创建内核线程。并且可以根据当前系统CPU的个 数创建线程的数量,使得线程处理的事务能够并行化。workqueue是内核中实现简单而有效的机制,他显然简化了内核daemon的创建,方便了用户的 编程。

工作队列(workqueue)是另外一种将工作推后执行的形式.工作队列可以把工作推后,交由一个内核线程去执行,也就是说,这个下半部分可以在进程上下文中执行。最重要的就是工作队列允许被重新调度甚至是睡眠。

2,结构体/源码相关

推后执行的任务叫做工作,对应的结构体

//参考文件 kernel/include/linux/workqueue.hstruct work_struct {    atomic_long_t data;     //paramters of work func 宏做函数的参数    struct list_head entry;   //work node point链接结构提指针  ,一个链表节点    work_func_t func;    //deal with work func 处理工作的函数,typedef void (*work_func_t)(struct work_struct *work);#ifdef CONFIG_LOCKDEP    struct lockdep_map lockdep_map;#endif};
//参考文件 kernel/include/linux/workqueue.hstruct delayed_work {    struct work_struct work;    struct timer_list timer;    /* target workqueue and CPU ->timer uses to queue ->work */    struct workqueue_struct *wq;    int cpu;};

创建类型:

静态地创建工作项:

DECLARE_WORK(n, f)
DECLARE_DELAYED_WORK(n, f)

动态地创建工作项:

INIT_WORK(struct work_struct work, work_func_t func); PREPARE_WORK(struct work_struct work, work_func_t func); INIT_DELAYED_WORK(struct delayed_work work, work_func_t func); PREPARE_DELAYED_WORK(struct delayed_work work, work_func_t func); 

3相关术语

workqueue: 所有工作项(需要被执行的工作)被排列于该队列.
worker thread: 是一个用于执行 workqueue 中各个工作项的内核线程, 当 workqueue 中没有工作项时, 该线程将变为 idle 状态.
single threaded(ST): worker thread 的表现形式之一, 在系统范围内, 只有一个 worker thread 为 workqueue 服务.
multi threaded(MT): worker thread 的表现形式之一, 在多 CPU 系统上每个 CPU 上都有一个 worker thread 为 workqueue 服务.

4创建以及使用

PART1:工作队列

work_handler // work中任务具体操作函数,workqueue 中的每个工作完成之后就被移除 workqueue.返回类型必须是void,参数必须是struct work_struct *work 类型
config_gpio_work(struct work_struct *work)
create_singlethread_workqueue(name) //返回一个名字为 name 的struct ,不再使用时需要destroy_workqueue(struct workqueue_struct *wq)来释放此处的内存地址。

workqueue_struct队列结构体。ST模式
INIT_WORK(&work, work_handler);//初始化struct work_struct 结构体,
queue_work(queue, &work); //放入队列工作

destroy_workqueue(queue); //销毁队列。

代码实战

struct workqueue_struct *mWorkqueue;  struct m_work{   //自定义结构体,作为work struct的私有数据 struct work_struct inner_work;   //具体的工作void * paramter;   //需要传递的参数 void   指针类型,当然也可以直接使用work_struct-〉data传递参数};void config_gpio_work(struct work_struct *work)  //返回类型必须是void {    struct m_work * mwork;   int gpio_num;   mwork=container_of(work,struct m_work,inner_work);//自定义的结构体,这个函数的使用   gpio_num = *((int *)mwork->paramter);   printk(KERN_INFO"the gpio get  a value of =====workqueue==%d= \n",gpio_num);    return;}int handle_workqueue(int gpio_num){   struct m_work * mwork;   mwork = kzalloc(sizeof(struct m_work), GFP_ATOMIC);   if(!mwork)   {   printk(KERN_INFO"=====kernel mwork initial faild ==========\n");   return -1;   }   mwork->paramter = &gpio_num;//参数赋值   INIT_WORK(&mwork->inner_work,config_gpio_work);//初始化   if(mWorkqueue==NULL)   {          mWorkqueue=create_singlethread_workqueue("joymine_workqueue");   }  queue_work(mWorkqueue,&gpio_work)kfree(mwork);destroy_workqueue(mWorkqueue);mWorkqueue=NULL;mwork=NULL;//   不用了的指针,一定要释放内存再置空,避免野指针 return 0;}

ttp://blog.csdn.net/myarrow/article/details/8090504
http://blog.csdn.net/brucexu1978/article/details/7173100

5实践遇到的问题以及问题分析。

A,container_of(parm1,parm2,pram3)函数的使用

//问题一 A,container_of(parm1,parm2,pram3)函数的使用/** * container_of - cast a member of a structure out to the containing structure * @ptr:        the pointer to the member. * @type:       the type of the container struct this is embedded in. * @member:     the name of the member within the struct. * */ #define container_of(ptr, type, member) ({                      \ const typeof( ((type *)0)->member ) *__mptr = (ptr);    \ (type *)( (char *)__mptr - offsetof(type,member) );})

**它的作用显而易见,那就是根据一个结构体变量中的一个域成员变量的指针来获取指向整个结构体变量的指针。
参考上面的用例。**

B:关于指针,动态分配内存,malloc,需要释放内存,然后将指针指向NULL

PART 2:CMWQ(Concurrency Managed Workqueue )
有空需要按照如下链接学习了解原理以及使用
http://www.wowotech.net/irq_subsystem/workqueue.html

附上头文件
kernel/include/linux/workqueue.h

/* * workqueue.h --- work queue handling for Linux. */#ifndef _LINUX_WORKQUEUE_H#define _LINUX_WORKQUEUE_H#include <linux/timer.h>#include <linux/linkage.h>#include <linux/bitops.h>#include <linux/lockdep.h>#include <linux/threads.h>#include <linux/atomic.h>#include <linux/cpumask.h>struct workqueue_struct;struct work_struct;typedef void (*work_func_t)(struct work_struct *work);void delayed_work_timer_fn(unsigned long __data);/* * The first word is the work queue pointer and the flags rolled into * one */#define work_data_bits(work) ((unsigned long *)(&(work)->data))enum {    WORK_STRUCT_PENDING_BIT = 0,    /* work item is pending execution */    WORK_STRUCT_DELAYED_BIT = 1,    /* work item is delayed */    WORK_STRUCT_PWQ_BIT = 2,    /* data points to pwq */    WORK_STRUCT_LINKED_BIT  = 3,    /* next work is linked to this one */#ifdef CONFIG_DEBUG_OBJECTS_WORK    WORK_STRUCT_STATIC_BIT  = 4,    /* static initializer (debugobjects) */    WORK_STRUCT_COLOR_SHIFT = 5,    /* color for workqueue flushing */#else    WORK_STRUCT_COLOR_SHIFT = 4,    /* color for workqueue flushing */#endif    WORK_STRUCT_COLOR_BITS  = 4,    WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,    WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,    WORK_STRUCT_PWQ     = 1 << WORK_STRUCT_PWQ_BIT,    WORK_STRUCT_LINKED  = 1 << WORK_STRUCT_LINKED_BIT,#ifdef CONFIG_DEBUG_OBJECTS_WORK    WORK_STRUCT_STATIC  = 1 << WORK_STRUCT_STATIC_BIT,#else    WORK_STRUCT_STATIC  = 0,#endif    /*     * The last color is no color used for works which don't     * participate in workqueue flushing.     */    WORK_NR_COLORS      = (1 << WORK_STRUCT_COLOR_BITS) - 1,    WORK_NO_COLOR       = WORK_NR_COLORS,    /* special cpu IDs */    WORK_CPU_UNBOUND    = NR_CPUS,    WORK_CPU_END        = NR_CPUS + 1,    /*     * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.     * This makes pwqs aligned to 256 bytes and allows 15 workqueue     * flush colors.     */    WORK_STRUCT_FLAG_BITS   = WORK_STRUCT_COLOR_SHIFT +                  WORK_STRUCT_COLOR_BITS,    /* data contains off-queue information when !WORK_STRUCT_PWQ */    WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,    __WORK_OFFQ_CANCELING   = WORK_OFFQ_FLAG_BASE,    WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),    /*     * When a work item is off queue, its high bits point to the last     * pool it was on.  Cap at 31 bits and use the highest number to     * indicate that no pool is associated.     */    WORK_OFFQ_FLAG_BITS = 1,    WORK_OFFQ_POOL_SHIFT    = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,    WORK_OFFQ_LEFT      = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,    WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,    WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,    /* convenience constants */    WORK_STRUCT_FLAG_MASK   = (1UL << WORK_STRUCT_FLAG_BITS) - 1,    WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,    WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,    /* bit mask for work_busy() return values */    WORK_BUSY_PENDING   = 1 << 0,    WORK_BUSY_RUNNING   = 1 << 1,    /* maximum string length for set_worker_desc() */    WORKER_DESC_LEN     = 24,};struct work_struct {    atomic_long_t data;    struct list_head entry;    work_func_t func;#ifdef CONFIG_LOCKDEP    struct lockdep_map lockdep_map;#endif};#define WORK_DATA_INIT()    ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)#define WORK_DATA_STATIC_INIT() \    ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)struct delayed_work {    struct work_struct work;    struct timer_list timer;    /* target workqueue and CPU ->timer uses to queue ->work */    struct workqueue_struct *wq;    int cpu;};/* * A struct for workqueue attributes.  This can be used to change * attributes of an unbound workqueue. * * Unlike other fields, ->no_numa isn't a property of a worker_pool.  It * only modifies how apply_workqueue_attrs() select pools and thus doesn't * participate in pool hash calculations or equality comparisons. */struct workqueue_attrs {    int         nice;       /* nice level */    cpumask_var_t       cpumask;    /* allowed CPUs */    bool            no_numa;    /* disable NUMA affinity */};static inline struct delayed_work *to_delayed_work(struct work_struct *work){    return container_of(work, struct delayed_work, work);}struct execute_work {    struct work_struct work;};#ifdef CONFIG_LOCKDEP/* * NB: because we have to copy the lockdep_map, setting _key * here is required, otherwise it could get initialised to the * copy of the lockdep_map! */#define __WORK_INIT_LOCKDEP_MAP(n, k) \    .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),#else#define __WORK_INIT_LOCKDEP_MAP(n, k)#endif#define __WORK_INITIALIZER(n, f) {                  \    .data = WORK_DATA_STATIC_INIT(),                \    .entry  = { &(n).entry, &(n).entry },               \    .func = (f),                            \    __WORK_INIT_LOCKDEP_MAP(#n, &(n))               \    }#define __DELAYED_WORK_INITIALIZER(n, f, tflags) {          \    .work = __WORK_INITIALIZER((n).work, (f)),          \    .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,     \                     0, (unsigned long)&(n),        \                     (tflags) | TIMER_IRQSAFE),     \    }#define DECLARE_WORK(n, f)                      \    struct work_struct n = __WORK_INITIALIZER(n, f)#define DECLARE_DELAYED_WORK(n, f)                  \    struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)#define DECLARE_DEFERRABLE_WORK(n, f)                   \    struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)/* * initialize a work item's function pointer */#define PREPARE_WORK(_work, _func)                  \    do {                                \        (_work)->func = (_func);                \    } while (0)#define PREPARE_DELAYED_WORK(_work, _func)              \    PREPARE_WORK(&(_work)->work, (_func))#ifdef CONFIG_DEBUG_OBJECTS_WORKextern void __init_work(struct work_struct *work, int onstack);extern void destroy_work_on_stack(struct work_struct *work);static inline unsigned int work_static(struct work_struct *work){    return *work_data_bits(work) & WORK_STRUCT_STATIC;}#elsestatic inline void __init_work(struct work_struct *work, int onstack) { }static inline void destroy_work_on_stack(struct work_struct *work) { }static inline unsigned int work_static(struct work_struct *work) { return 0; }#endif/* * initialize all of a work item in one go * * NOTE! No point in using "atomic_long_set()": using a direct * assignment of the work data initializer allows the compiler * to generate better code. */#ifdef CONFIG_LOCKDEP#define __INIT_WORK(_work, _func, _onstack)             \    do {                                \        static struct lock_class_key __key;         \                                    \        __init_work((_work), _onstack);             \        (_work)->data = (atomic_long_t) WORK_DATA_INIT();   \        lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \        INIT_LIST_HEAD(&(_work)->entry);            \        PREPARE_WORK((_work), (_func));             \    } while (0)#else#define __INIT_WORK(_work, _func, _onstack)             \    do {                                \        __init_work((_work), _onstack);             \        (_work)->data = (atomic_long_t) WORK_DATA_INIT();   \        INIT_LIST_HEAD(&(_work)->entry);            \        PREPARE_WORK((_work), (_func));             \    } while (0)#endif#define INIT_WORK(_work, _func)                     \    do {                                \        __INIT_WORK((_work), (_func), 0);           \    } while (0)#define INIT_WORK_ONSTACK(_work, _func)                 \    do {                                \        __INIT_WORK((_work), (_func), 1);           \    } while (0)#define __INIT_DELAYED_WORK(_work, _func, _tflags)          \    do {                                \        INIT_WORK(&(_work)->work, (_func));         \        __setup_timer(&(_work)->timer, delayed_work_timer_fn,   \                  (unsigned long)(_work),           \                  (_tflags) | TIMER_IRQSAFE);       \    } while (0)#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)      \    do {                                \        INIT_WORK_ONSTACK(&(_work)->work, (_func));     \        __setup_timer_on_stack(&(_work)->timer,         \                       delayed_work_timer_fn,       \                       (unsigned long)(_work),      \                       (_tflags) | TIMER_IRQSAFE);  \    } while (0)#define INIT_DELAYED_WORK(_work, _func)                 \    __INIT_DELAYED_WORK(_work, _func, 0)#define INIT_DELAYED_WORK_ONSTACK(_work, _func)             \    __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)#define INIT_DEFERRABLE_WORK(_work, _func)              \    __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)          \    __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)/** * work_pending - Find out whether a work item is currently pending * @work: The work item in question */#define work_pending(work) \    test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))/** * delayed_work_pending - Find out whether a delayable work item is currently * pending * @work: The work item in question */#define delayed_work_pending(w) \    work_pending(&(w)->work)/** * work_clear_pending - for internal use only, mark a work item as not pending * @work: The work item in question */#define work_clear_pending(work) \    clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))/* * Workqueue flags and constants.  For details, please refer to * Documentation/workqueue.txt. */enum {    WQ_NON_REENTRANT    = 1 << 0, /* guarantee non-reentrance */    WQ_UNBOUND      = 1 << 1, /* not bound to any cpu */    WQ_FREEZABLE        = 1 << 2, /* freeze during suspend */    WQ_MEM_RECLAIM      = 1 << 3, /* may be used for memory reclaim */    WQ_HIGHPRI      = 1 << 4, /* high priority */    WQ_CPU_INTENSIVE    = 1 << 5, /* cpu instensive workqueue */    WQ_SYSFS        = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */    __WQ_DRAINING       = 1 << 16, /* internal: workqueue is draining */    __WQ_ORDERED        = 1 << 17, /* internal: workqueue is ordered */    WQ_MAX_ACTIVE       = 512,    /* I like 512, better ideas? */    WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */    WQ_DFL_ACTIVE       = WQ_MAX_ACTIVE / 2,};/* unbound wq's aren't per-cpu, scale max_active according to #cpus */#define WQ_UNBOUND_MAX_ACTIVE   \    max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)/* * System-wide workqueues which are always present. * * system_wq is the one used by schedule[_delayed]_work[_on](). * Multi-CPU multi-threaded.  There are users which expect relatively * short queue flush time.  Don't queue works which can run for too * long. * * system_long_wq is similar to system_wq but may host long running * works.  Queue flushing might take relatively long. * * system_unbound_wq is unbound workqueue.  Workers are not bound to * any specific CPU, not concurrency managed, and all queued works are * executed immediately as long as max_active limit is not reached and * resources are available. * * system_freezable_wq is equivalent to system_wq except that it's * freezable. */extern struct workqueue_struct *system_wq;extern struct workqueue_struct *system_long_wq;extern struct workqueue_struct *system_unbound_wq;extern struct workqueue_struct *system_freezable_wq;static inline struct workqueue_struct * __deprecated __system_nrt_wq(void){    return system_wq;}static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void){    return system_freezable_wq;}/* equivlalent to system_wq and system_freezable_wq, deprecated */#define system_nrt_wq           __system_nrt_wq()#define system_nrt_freezable_wq     __system_nrt_freezable_wq()extern struct workqueue_struct *__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,    struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);/** * alloc_workqueue - allocate a workqueue * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags * @max_active: max in-flight work items, 0 for default * @args: args for @fmt * * Allocate a workqueue with the specified parameters.  For detailed * information on WQ_* flags, please refer to Documentation/workqueue.txt. * * The __lock_name macro dance is to guarantee that single lock_class_key * doesn't end up with different namesm, which isn't allowed by lockdep. * * RETURNS: * Pointer to the allocated workqueue on success, %NULL on failure. */#ifdef CONFIG_LOCKDEP#define alloc_workqueue(fmt, flags, max_active, args...)        \({                                  \    static struct lock_class_key __key;             \    const char *__lock_name;                    \                                    \    if (__builtin_constant_p(fmt))                  \        __lock_name = (fmt);                    \    else                                \        __lock_name = #fmt;                 \                                    \    __alloc_workqueue_key((fmt), (flags), (max_active),     \                  &__key, __lock_name, ##args);     \})#else#define alloc_workqueue(fmt, flags, max_active, args...)        \    __alloc_workqueue_key((fmt), (flags), (max_active),     \                  NULL, NULL, ##args)#endif/** * alloc_ordered_workqueue - allocate an ordered workqueue * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) * @args: args for @fmt * * Allocate an ordered workqueue.  An ordered workqueue executes at * most one work item at any given time in the queued order.  They are * implemented as unbound workqueues with @max_active of one. * * RETURNS: * Pointer to the allocated workqueue on success, %NULL on failure. */#define alloc_ordered_workqueue(fmt, flags, args...)            \    alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)#define create_workqueue(name)                      \    alloc_workqueue((name), WQ_MEM_RECLAIM, 1)#define create_freezable_workqueue(name)                \    alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)#define create_singlethread_workqueue(name)             \    alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)extern void destroy_workqueue(struct workqueue_struct *wq);struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);void free_workqueue_attrs(struct workqueue_attrs *attrs);int apply_workqueue_attrs(struct workqueue_struct *wq,              const struct workqueue_attrs *attrs);extern bool queue_work_on(int cpu, struct workqueue_struct *wq,            struct work_struct *work);extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,            struct delayed_work *work, unsigned long delay);extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,            struct delayed_work *dwork, unsigned long delay);extern void flush_workqueue(struct workqueue_struct *wq);extern void drain_workqueue(struct workqueue_struct *wq);extern void flush_scheduled_work(void);extern int schedule_on_each_cpu(work_func_t func);int execute_in_process_context(work_func_t fn, struct execute_work *);extern bool flush_work(struct work_struct *work);extern bool cancel_work_sync(struct work_struct *work);extern bool flush_delayed_work(struct delayed_work *dwork);extern bool cancel_delayed_work(struct delayed_work *dwork);extern bool cancel_delayed_work_sync(struct delayed_work *dwork);extern void workqueue_set_max_active(struct workqueue_struct *wq,                     int max_active);extern bool current_is_workqueue_rescuer(void);extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);extern unsigned int work_busy(struct work_struct *work);extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);extern void print_worker_info(const char *log_lvl, struct task_struct *task);/** * queue_work - queue work on a workqueue * @wq: workqueue to use * @work: work to queue * * Returns %false if @work was already on a queue, %true otherwise. * * We queue the work to the CPU on which it was submitted, but if the CPU dies * it can be processed by another CPU. */static inline bool queue_work(struct workqueue_struct *wq,                  struct work_struct *work){    return queue_work_on(WORK_CPU_UNBOUND, wq, work);}/** * queue_delayed_work - queue work on a workqueue after delay * @wq: workqueue to use * @dwork: delayable work to queue * @delay: number of jiffies to wait before queueing * * Equivalent to queue_delayed_work_on() but tries to use the local CPU. */static inline bool queue_delayed_work(struct workqueue_struct *wq,                      struct delayed_work *dwork,                      unsigned long delay){    return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);}/** * mod_delayed_work - modify delay of or queue a delayed work * @wq: workqueue to use * @dwork: work to queue * @delay: number of jiffies to wait before queueing * * mod_delayed_work_on() on local CPU. */static inline bool mod_delayed_work(struct workqueue_struct *wq,                    struct delayed_work *dwork,                    unsigned long delay){    return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);}/** * schedule_work_on - put work task on a specific cpu * @cpu: cpu to put the work task on * @work: job to be done * * This puts a job on a specific cpu */static inline bool schedule_work_on(int cpu, struct work_struct *work){    return queue_work_on(cpu, system_wq, work);}/** * schedule_work - put work task in global workqueue * @work: job to be done * * Returns %false if @work was already on the kernel-global workqueue and * %true otherwise. * * This puts a job in the kernel-global workqueue if it was not already * queued and leaves it in the same position on the kernel-global * workqueue otherwise. */static inline bool schedule_work(struct work_struct *work){    return queue_work(system_wq, work);}/** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use * @dwork: job to be done * @delay: number of jiffies to wait * * After waiting for a given time this puts a job in the kernel-global * workqueue on the specified CPU. */static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,                        unsigned long delay){    return queue_delayed_work_on(cpu, system_wq, dwork, delay);}/** * schedule_delayed_work - put work task in global workqueue after delay * @dwork: job to be done * @delay: number of jiffies to wait or 0 for immediate execution * * After waiting for a given time this puts a job in the kernel-global * workqueue. */static inline bool schedule_delayed_work(struct delayed_work *dwork,                     unsigned long delay){    return queue_delayed_work(system_wq, dwork, delay);}/** * keventd_up - is workqueue initialized yet? */static inline bool keventd_up(void){    return system_wq != NULL;}/* * Like above, but uses del_timer() instead of del_timer_sync(). This means, * if it returns 0 the timer function may be running and the queueing is in * progress. */static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work){    bool ret;    ret = del_timer(&work->timer);    if (ret)        work_clear_pending(&work->work);    return ret;}/* used to be different but now identical to flush_work(), deprecated */static inline bool __deprecated flush_work_sync(struct work_struct *work){    return flush_work(work);}/* used to be different but now identical to flush_delayed_work(), deprecated */static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork){    return flush_delayed_work(dwork);}#ifndef CONFIG_SMPstatic inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg){    return fn(arg);}#elselong work_on_cpu(int cpu, long (*fn)(void *), void *arg);#endif /* CONFIG_SMP */#ifdef CONFIG_FREEZERextern void freeze_workqueues_begin(void);extern bool freeze_workqueues_busy(void);extern void thaw_workqueues(void);#endif /* CONFIG_FREEZER */#ifdef CONFIG_SYSFSint workqueue_sysfs_register(struct workqueue_struct *wq);#else   /* CONFIG_SYSFS */static inline int workqueue_sysfs_register(struct workqueue_struct *wq){ return 0; }#endif  /* CONFIG_SYSFS */#endif
原创粉丝点击