Linux内核工作队列之任务执行
来源:互联网 发布:计算机领域中数据是指 编辑:程序博客网 时间:2024/04/30 03:45
在前面介绍了工作队列,工作队列上面任务,以及工作队列创建的内核线程,现在讨论线程上面处理的
每一个任务,即函数worker_thread()。
[] process_one_work[] worker_thread[] kthread
之所以任务第一个跑的函数是kthread,是由于函数kthread_create_on_node()创建时,其设置的kthread函数。
/** * worker_thread - the worker thread function * @__worker: self * * The worker thread function. All workers belong to a worker_pool - * either a per-cpu one or dynamic unbound one. These workers process all * work items regardless of their specific target workqueue. The only * exception is work items which belong to workqueues with a rescuer which * will be explained in rescuer_thread(). */static int worker_thread(void *__worker){ struct worker *worker = __worker; struct worker_pool *pool = worker->pool;
/* tell the scheduler that this is a workqueue worker */ worker->task->flags |= PF_WQ_WORKER;woke_up: spin_lock_irq(&pool->lock);
/* am I supposed to die? */ if (unlikely(worker->flags & WORKER_DIE)) { spin_unlock_irq(&pool->lock); WARN_ON_ONCE(!list_empty(&worker->entry)); worker->task->flags &= ~PF_WQ_WORKER; return 0; }
worker_leave_idle(worker);recheck: /* no more worker necessary? */ if (!need_more_worker(pool)) goto sleep;
/* do we need to manage? */ if (unlikely(!may_start_working(pool)) && manage_workers(worker)) goto recheck;
/* * ->scheduled list can only be filled while a worker is * preparing to process a work or actually processing it. * Make sure nobody diddled with it while I was sleeping. */ WARN_ON_ONCE(!list_empty(&worker->scheduled));
/* * Finish PREP stage. We're guaranteed to have at least one idle * worker or that someone else has already assumed the manager * role. This is where @worker starts participating in concurrency * management if applicable and concurrency management is restored * after being rebound. See rebind_workers() for details. */ worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
do { struct work_struct *work = list_first_entry(&pool->worklist, struct work_struct, entry);
if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { /* optimization path, not strictly necessary */ process_one_work(worker, work); if (unlikely(!list_empty(&worker->scheduled))) process_scheduled_works(worker); } else { move_linked_works(work, &worker->scheduled, NULL); process_scheduled_works(worker); } } while (keep_working(pool));
worker_set_flags(worker, WORKER_PREP, false);sleep: if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker)) goto recheck;
/* * pool->lock is held and there's no work to process and no need to * manage, sleep. Workers are woken up only while holding * pool->lock or from local cpu, so setting the current state * before releasing pool->lock is enough to prevent losing any * event. */ worker_enter_idle(worker); __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&pool->lock); schedule(); goto woke_up;}
/** * process_one_work - process single work * @worker: self * @work: work to process * * Process @work. This function contains all the logics necessary to * process a single work including synchronization against and * interaction with other workers on the same cpu, queueing and * flushing. As long as context requirement is met, any worker can * call this function to process a work. * * CONTEXT: * spin_lock_irq(pool->lock) which is released and regrabbed. */static void process_one_work(struct worker *worker, struct work_struct *work)__releases(&pool->lock)__acquires(&pool->lock){ struct pool_workqueue *pwq = get_work_pwq(work); struct worker_pool *pool = worker->pool; bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; int work_color; struct worker *collision;#ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct from * inside the function that is called from it, this we need to * take into account for lockdep too. To avoid bogus "held * lock freed" warnings as well as problems when looking into * work->lockdep_map, make a copy and use that here. */ struct lockdep_map lockdep_map;
lockdep_copy_map(&lockdep_map, &work->lockdep_map);#endif /* * Ensure we're on the correct CPU. DISASSOCIATED test is * necessary to avoid spurious warnings from rescuers servicing the * unbound or a disassociated pool. */ WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && !(pool->flags & POOL_DISASSOCIATED) && raw_smp_processor_id() != pool->cpu);
/* * A single work shouldn't be executed concurrently by * multiple workers on a single cpu. Check whether anyone is * already processing the work. If so, defer the work to the * currently executing one. */ collision = find_worker_executing_work(pool, work); if (unlikely(collision)) { move_linked_works(work, &collision->scheduled, NULL); return; }
/* claim and dequeue */ debug_work_deactivate(work); hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); worker->current_work = work; worker->current_func = work->func; worker->current_pwq = pwq; work_color = get_work_color(work);
list_del_init(&work->entry);
/* * CPU intensive works don't participate in concurrency * management. They're the scheduler's responsibility. */ if (unlikely(cpu_intensive)) worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
/* * Unbound pool isn't concurrency managed and work items should be * executed ASAP. Wake up another worker if necessary. */ if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) wake_up_worker(pool);
/* * Record the last pool and clear PENDING which should be the last * update to @work. Also, do this inside @pool->lock so that * PENDING and queued state changes happen together while IRQ is * disabled. */ set_work_pool_and_clear_pending(work, pool->id);
spin_unlock_irq(&pool->lock);
lock_map_acquire_read(&pwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); trace_workqueue_execute_start(work); worker->current_func(work); /* * While we must be careful to not use "work" after this, the trace * point will only record its address. */ trace_workqueue_execute_end(work); lock_map_release(&lockdep_map); lock_map_release(&pwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" " last function: %pf\n", current->comm, preempt_count(), task_pid_nr(current), worker->current_func); debug_show_held_locks(current); dump_stack(); }
/* * The following prevents a kworker from hogging CPU on !PREEMPT * kernels, where a requeueing work item waiting for something to * happen could deadlock with stop_machine as such work item could * indefinitely requeue itself while all other CPUs are trapped in * stop_machine. */ cond_resched();
spin_lock_irq(&pool->lock);
/* clear cpu intensive status */ if (unlikely(cpu_intensive)) worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
/* we're done with it, release */ hash_del(&worker->hentry); worker->current_work = NULL; worker->current_func = NULL; worker->current_pwq = NULL; worker->desc_valid = false; pwq_dec_nr_in_flight(pwq, work_color);}
- Linux内核工作队列之任务执行
- linux内核分析之工作队列
- Linux内核实践之工作队列
- linux内核知识之工作队列(workqueue)
- Linux内核实践之工作队列
- linux 内核 工作队列
- Linux内核工作队列
- Linux内核:工作队列
- Linux 内核工作队列
- Linux内核:工作队列
- linux内核工作队列
- linux内核“任务”之软中断、tasklet、工作者队列
- linux内核“任务”之软中断、tasklet、工作者队列
- linux内核“任务”之软中断、tasklet、工作者队列
- 异步任务执行之-队列
- linux内核的工作队列
- tasklet、工作队列 - [linux内核]
- Linux内核机制:工作队列
- Java生产者消费者问题详解
- leetcode 453. Minimum Moves to Equal Array Elements python
- How tomcat works——13 Host 和 Engine
- JVM内存管理机制
- [LeetCode]373. Find K Pairs with Smallest Sums
- Linux内核工作队列之任务执行
- 利用Java反射机制实现类的加载
- Markdown 简易入门教程
- java 的八种原生数据类型
- 1.Redis简介
- LOOP指令(0502)
- notepad++ 换行技巧 log换行
- 递归查找文件夹内所有文件
- 字节流4种copy方式