4.4WorkQueue

来源:互联网 发布:windows 7 sp1 32位 编辑:程序博客网 时间:2024/06/05 02:49

前言

 之前在写单片机的程序时候,在通信接口的驱动编写的时候遇到过缓存的思想,在后端代码编写上也是顺序读取并执行相关代码;说到底是对这一系列的操作没有一个完整可描述的处理框架,现在接触到WorkQueue,大致搜罗一些资料看了下,感性上觉得是一种处理这种前后台的框架,下面以nuttx为基础进行详细的分析;

中断

 中断服务程序一般都是在中断请求关闭的条件下执行的,以避免嵌套而使中断控制复杂化。但是,中断是一个随机事件,它随时会到来,如果关中断的时间太长,CPU就不能及时响应其他的中断请求,从而造成中断的丢失。作为os内核的目标显然就是尽可能快的处理完中断请求(例如接收通信接口的数据、标记中断flag...),接下来关闭中断、恢复现场,尽可能的把数据处理推后执行;所以这里就明显的分为了两部分:上半部分(tophalf)和下半部分(bottomhalf); 上半部分,就是中断服务程序,需要内核来立即执行,处理中断引发的一些列操作,运行时是关闭中断的; 下半部分,就是内核函数(我的理解就是调用worker去处理中断获取到的信息),是需要推后执行得到程序,运行时是打开中断的;

WorkQueue

 工作队列(work queue)是另外一种将工作推后执行的形式。工作队列可以把工作推后,交由一个内核线程去执行,也就是说,这个下半部分可以在进程上下文中执行。这样,通过工作队列执行的代码能占尽进程上下文的所有优势。最重要的就是工作队列允许被重新调度甚至是睡眠。如前所述,我们把推后执行的任务叫做工作(worker),描述它的数据结构为work_s,这些工作以队列结构组织成工作队列(workqueue),其数据结构为wqueue_s,而工作线程就是负责执行工作队列中的工作。在nuttx中就定义了两条workqueue,struct wqueue_s g_work[NWORKERS];

路径为:nuttx/include/nuttx/wqueue.h
在nuttx中wqueue实现需要了解3个数据结构,1wqueue_s,2dq_queue_s,3work_s;

/* This structure defines the state on one work queue. This structure is * used internally by the OS and worker queue logic and should not be * accessed by application logic. */struct wqueue_s{  pid_t pid; /* The task ID of the worker thread */  struct dq_queue_s q; /* The queue of pending work */};
 这里需要说明的就是dq_queue_s,ta将整个WorkQueue构成了一个双向链表,至于主角就是接下来要说到的work_s;这些结构被连接成链表。当一个工作者线程被唤醒时,它会执行它的链表上的所有工作。工作被执行完毕,它就将相应的work_struct对象从链表上移去。当链表上不再有对象的时候,它就会继续休眠。
/* Defines one entry in the work queue. The user only needs this structure * in order to declare instances of the work structure. Handling of all * fields is performed by the work APIs */struct work_s{  struct dq_entry_s dq; /* 实现一个双向链表 */  worker_t worker; /* Work 回调函数 */  FAR void *arg; /* Work回调函数参数 */  uint32_t qtime; /* Work加入队列的时间 */  uint32_t delay; /* Work延时执行的时间 */};     接下来看下work_queue的具体运行,从os_bringup()函数启动中可以看到其启动的过程:
/* Start the worker thread that will serve as the device driver "bottom- * half" and will perform misc garbage clean-up. */#ifdef CONFIG_SCHED_WORKQUEUE#ifdef CONFIG_SCHED_HPWORK#ifdef CONFIG_SCHED_LPWORK  svdbg("Starting high-priority kernel worker thread\n");#else  svdbg("Starting kernel worker thread\n");#endif  g_work[HPWORK].pid = KERNEL_THREAD(HPWORKNAME,                                     CONFIG_SCHED_WORKPRIORITY,                                     CONFIG_SCHED_WORKSTACKSIZE,                                     (main_t)work_hpthread,                                     (FAR char * const *)NULL);  DEBUGASSERT(g_work[HPWORK].pid > 0);  /* Start a lower priority worker thread for other, non-critical continuation  * tasks  */#ifdef CONFIG_SCHED_LPWORK  svdbg("Starting low-priority kernel worker thread\n");  g_work[LPWORK].pid = KERNEL_THREAD(LPWORKNAME,                                     CONFIG_SCHED_LPWORKPRIORITY,                                     CONFIG_SCHED_LPWORKSTACKSIZE,                                     (main_t)work_lpthread,                                     (FAR char * const *)NULL);  DEBUGASSERT(g_work[LPWORK].pid > 0);#endif /* CONFIG_SCHED_LPWORK */#endif /* CONFIG_SCHED_HPWORK */#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_SCHED_USRWORK)  /* Start the user-space work queue */  DEBUGASSERT(USERSPACE->work_usrstart != NULL);  taskid = USERSPACE->work_usrstart();  DEBUGASSERT(taskid > 0);#endif#endif /* CONFIG_SCHED_WORKQUEUE */
 在work_lpthread和work_hpthread中直接就是一个for(;;)循环,里面主要就是执行work_process(),下面就work_process()进行仔细分析,剖析其实现过程:
/**************************************************************************** * Name: work_process * * Description: * This is the logic that performs actions placed on any work list. * * Input parameters: * wqueue - Describes the work queue to be processed * * Returned Value: * None * ****************************************************************************/static void work_process(struct wqueue_s *wqueue, int lock_id){volatile struct work_s *work;          #定义一个workworker_t worker;                       #定义work回调函数void *arg;                             #定义回调函数形参变量uint64_t elapsed;                      #uint32_t remaining;                    #uint32_t next;                         #/* Then process queued work. We need to keep interrupts disabled while* we process items in the work list.*/next = CONFIG_SCHED_WORKPERIOD;work_lock(lock_id);                    #这就是个信号量work = (struct work_s *)wqueue->q.head;#从双向链表中取出一个workwhile (work) {/* Is this work ready? It is ready if there is no delay or if* the delay has elapsed. qtime is the time that the work was added* to the work queue. It will always be greater than or equal to* zero. Therefore a delay of zero will always execute immediately.*/elapsed = USEC2TICK(clock_systimer() - work->qtime);#记录work插入链表的时间到现在的时间差//printf("work_process: in ticks elapsed=%lu delay=%u\n", elapsed, work->delay);if (elapsed >= work->delay) {                       #如果时间差已经大于需要延时的时间,删除“node”并提取出链表中的work/* Remove the ready-to-execute work from the list */(void)dq_rem((struct dq_entry_s *)work, &wqueue->q);/* Extract the work description from the entry (in case the work* instance by the re-used after it has been de-queued).*/worker = work->worker;arg = work->arg;/* Mark the work as no longer being queued */         #标记work的回调函数为空,不再排队work->worker = NULL;/* Do the work. Re-enable interrupts while the work is being* performed... we don't have any idea how long that will take!*/work_unlock(lock_id);                                 #撤销信号量if (!worker) {PX4_WARN("MESSED UP: worker = 0\n");} else {     worker(arg);                                          #执行work}/* Now, unfortunately, since we re-enabled interrupts we don't* know the state of the work list and we will have to start* back at the head of the list.*/#执行完一个work以后,又必须从头开始(就是重新调到链表头)work_lock(lock_id);work = (struct work_s *)wqueue->q.head;} else {/* This one is not ready.. will it be ready before the next* scheduled wakeup interval?*//* Here: elapsed < work->delay */remaining = USEC_PER_TICK * (work->delay - elapsed);   #由于还没有到work的延时时间,就计算出还剩下的时间,并更新到work_queue的睡眠时间if (remaining < next) {/* Yes.. Then schedule to wake up when the work is ready */next = remaining;}/* Then try the next in the list. */work = (struct work_s *)work->dq.flink;                 #将链表指向下一个work}}/* Wait awhile to check the work list. We will wait here until either* the time elapses or until we are awakened by a signal.*/work_unlock(lock_id);usleep(next);}

总的来说,nuttx中的workqueue的逻辑还是比较简单的,实现的基础就是一个双向链表+KERNEL_THREAD,在主循环中的逻辑是不断的去判断每个work的插入链表时间与当前系统时间的差是否超过了work自身定义的延时时间:
1.如果超过了自身定义的延时时间,就马上将该work提取出来,并在链表中删除,然后执行;需要注意的一点就是这时候必须又从链表头执行判断;
2.如果没有超过自身定义的延时时间,计算还余下的时间并保存,执行链表上的下一个work;
3.如果链表上没有可执行的work了,就usleep一段时间;

接下来写下nuttx work_queue的接口:
在写接口之前,先叙述下的就是,子啊nuttx中,存在两类Work Queue IDs:
Kernel-Mode Work Queue IDs:

  • HPWORK.只能用于高优先级,时间关键的驱动程序的下半部分(我的理解是,例如IIC、SPI这类传感器数据传输)
  • LPWORK.可以用于任何目的的低优先级的代码,如果没有开启低优先级,当内核中只有一个work queue时也是等同于HPWORK的;

User-Mode Work Queue IDs:

  • USRWORK.简而言之就是可以被任意的app使用;

sigle list queue
单向链表队列:
1. 单向链表队列结构体中记录了first node的地址 head 和 last node的地址 tail
2. 队列空时, head tail 指针等于NULL
3. 最后一个节点 last node, 向前遍历flink指针等于NULL

/* 节点结构器 */struct sq_entry_s{  struct sq_entry_s *flink; /* forward */};typedef struct sq_entry_s sq_entry_t;/* 单向队列结构体 */struct sq_queue_s{  sq_entry_t *head; /* queue head(first) node */  sq_entry_t *tail; /* queue tail(last) node */};typedef struct sq_queue_s sq_queue_t;void sq_addfirst(sq_entry_t *node, sq_queue_t *queue);/* 添加单向列表 表头  */void sq_addlast(sq_entry_t *node, sq_queue_t *queue);/* 添加单向列表 表尾  */void sq_addafter(sq_entry_t *prev, sq_entry_t *node, sq_queue_t *queue);/* 插入一个节点到prev处  */sq_entry_t *sq_remfirst(sq_queue_t *queue);sq_entry_t *sq_remlast(sq_queue_t *queue);sq_entry_t *sq_remafter(sq_entry_t *node, sq_queue_t *queue);void sq_rem(sq_entry_t *node, sq_queue_t *queue);--> flink        +------------------------------------------------sigle list queue head        |        +    +--------+--------+~~~~~~~~~+--------+--------+------+    |  first |        |        |        |  last  |      |    |  node  |  node  | ~~~~~~~ |  node  |  node  | NULL |    |    +-->|    +-->|    +--->|    +-->|    +-->|      |    +--------+--------+~~~~~~~~~+--------+--------+------+                                              +                                              |                                              +----------sigle list queue tail

doule list queue
双向链表队列:
1. 双向链表队列结构体中记录了first node的地址 head 和 last node的地址 tail
2. 队列空时, head tail 指针等于NULL
3. 最前一个节点first node, 向后遍历blink指针等于NULL
4. 最后一个节点last node, 向前遍历flink指针等于NULL

struct dq_entry_s{  FAR struct dq_entry_s *flink;  FAR struct dq_entry_s *blink;};typedef struct dq_entry_s dq_entry_t;struct dq_queue_s{  dq_entry_t *head; /* queue head(first) node */  dq_entry_t *tail; /* queue tail(last) node */};typedef struct dq_queue_s dq_queue_t;void dq_addfirst(dq_entry_t *node, dq_queue_t *queue);void dq_addlast(dq_entry_t *node, dq_queue_t *queue);void dq_addafter(dq_entry_t *prev, dq_entry_t *node, dq_queue_t *queue);void dq_addbefore(dq_entry_t *next, dq_entry_t *node, dq_queue_t *queue);dq_entry_t *dq_remfirst(dq_queue_t *queue);dq_entry_t *dq_remlast(dq_queue_t *queue);void dq_rem(dq_entry_t *node, dq_queue_t *queue);--> flink  <-- blink          +----------------------------------------------doule list queue head          |          ++------+--------+--------+~~~~~~~~~+--------+--------+------+|      |  first |        |        |        |  last  |      || NULL |  node  |  node  | ~~~~~~~ |  node  |  node  | NULL ||      |<--+    |<--+    |<--+    |<--+    |<--+    |      ||      |    +-->|    +-->|    +-->|    +-->|    +-->|      |+------+--------+--------+~~~~~~~~~+--------+--------+------+                                                +                                                |                                                +-------doule list queue tai
原创粉丝点击