Linux进程调度

来源:互联网 发布:鲸鱼死后爆炸知乎 编辑:程序博客网 时间:2024/04/30 02:40
进程的调度时机与进程的切换

操作系统原理中介绍了大量进程调度算法,这些算法从实现的角度看仅仅是从运行队列中选择一个新进程,选择的过程中运用了不同的策略而已。
对于理解操作系统的工作机制,反而是进程的调度时机与进程的切换机制更为关键。

进程调度的时机
中断处理过程(包括时钟中断、I/O中断、系统调用和异常)中,直接调用schedule(),或者返回用户态时根据need_resched标记调用schedule();
内核线程可以直接调用schedule()进行进程切换,也可以在中断处理过程中进行调度,也就是说内核线程作为一类的特殊的进程可以主动调度,也可以被动调度;
用户态进程无法实现主动调度,仅能通过陷入内核态后的某个时机点进行调度,即在中断处理过程中进行调度。

进程的切换
为了控制进程的执行,内核必须有能力挂起正在CPU上执行的进程,并恢复以前挂起的某个进程的执行,这叫做进程切换、任务切换、上下文切换;
挂起正在CPU上执行的进程,与中断时保存现场是不同的,中断前后是在同一个进程上下文中,只是由用户态转向内核态执行;
进程上下文包含了进程执行需要的所有信息
用户地址空间: 包括程序代码,数据,用户堆栈等
控制信息 :进程描述符,内核堆栈等
硬件上下文(注意中断也要保存硬件上下文只是保存的方法不同)
schedule()函数选择一个新的进程来运行,并调用context_switch进行上下文的切换,这个宏调用switch_to来进行关键上下文切换
next = pick_next_task(rq, prev);//进程调度算法都封装这个函数内部
context_switch(rq, prev, next);//进程上下文切换

switch_to利用了prev和next两个参数:prev指向当前进程,next指向被调度的进程

schedule()函数源码如下:

/* * __schedule() is the main scheduler function. * * The main means of driving the scheduler and thus entering this function are: * *   1. Explicit blocking: mutex, semaphore, waitqueue, etc. * *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return *      paths. For example, see arch/x86/entry_64.S. * *      To drive preemption between tasks, the scheduler sets the flag in timer *      interrupt handler scheduler_tick(). * *   3. Wakeups don't really cause entry into schedule(). They add a *      task to the run-queue and that's it. * *      Now, if the new task added to the run-queue preempts the current *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets *      called on the nearest possible occasion: * *       - If the kernel is preemptible (CONFIG_PREEMPT=y): * *         - in syscall or exception context, at the next outmost *           preempt_enable(). (this might be as soon as the wake_up()'s *           spin_unlock()!) * *         - in IRQ context, return from interrupt-handler to *           preemptible context * *       - If the kernel is not preemptible (CONFIG_PREEMPT is not set) *         then at the next: * *          - cond_resched() call *          - explicit schedule() call *          - return from syscall or exception to user-space *          - return from interrupt-handler to user-space */static void __sched __schedule(void){struct task_struct *prev, *next;unsigned long *switch_count;struct rq *rq;int cpu;need_resched:preempt_disable();cpu = smp_processor_id();rq = cpu_rq(cpu);rcu_note_context_switch(cpu);prev = rq->curr;schedule_debug(prev);if (sched_feat(HRTICK))hrtick_clear(rq);/* * Make sure that signal_pending_state()->signal_pending() below * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) * done by the caller to avoid the race with signal_wake_up(). */smp_mb__before_spinlock();raw_spin_lock_irq(&rq->lock);switch_count = &prev->nivcsw;if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {if (unlikely(signal_pending_state(prev->state, prev))) {prev->state = TASK_RUNNING;} else {deactivate_task(rq, prev, DEQUEUE_SLEEP);prev->on_rq = 0;/* * If a worker went to sleep, notify and ask workqueue * whether it wants to wake up a task to maintain * concurrency. */if (prev->flags & PF_WQ_WORKER) {struct task_struct *to_wakeup;to_wakeup = wq_worker_sleeping(prev, cpu);if (to_wakeup)try_to_wake_up_local(to_wakeup);}}switch_count = &prev->nvcsw;}if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)update_rq_clock(rq);next = pick_next_task(rq, prev);//从等待队列中挑选出下一个等待调度的进程clear_tsk_need_resched(prev);clear_preempt_need_resched();rq->skip_clock_update = 0;if (likely(prev != next)) {rq->nr_switches++;rq->curr = next;++*switch_count;context_switch(rq, prev, next); /* unlocks the rq ,进行进程上下文的切换*//* * The context switch have flipped the stack from under us * and restored the local variables which were saved when * this task called schedule() in the past. prev == current * is still correct, but it can be moved to another cpu/rq. */cpu = smp_processor_id();rq = cpu_rq(cpu);} elseraw_spin_unlock_irq(&rq->lock);post_schedule(rq);sched_preempt_enable_no_resched();if (need_resched())goto need_resched;}
context_switch代码如下:

/* * context_switch - switch to the new MM and the new * thread's register state. */static inline voidcontext_switch(struct rq *rq, struct task_struct *prev,       struct task_struct *next){struct mm_struct *mm, *oldmm;prepare_task_switch(rq, prev, next);mm = next->mm;//mm指向了next进程的内存空间oldmm = prev->active_mm;/* * For paravirt, this is coupled with an exit in switch_to to * combine the page table reload and the switch backend into * one hypercall. */arch_start_context_switch(prev);if (!mm) {next->active_mm = oldmm;atomic_inc(&oldmm->mm_count);enter_lazy_tlb(oldmm, next);} elseswitch_mm(oldmm, mm, next);//进程内存空间切换if (!prev->mm) {prev->active_mm = NULL;rq->prev_mm = oldmm;}/* * Since the runqueue lock will be released by the next * task (which is an invalid locking op but in the case * of the scheduler it's an obvious special-case), so we * do an early lockdep release here: */spin_release(&rq->lock.dep_map, 1, _THIS_IP_);context_tracking_task_switch(prev, next);/* Here we just switch the register state and the stack. */switch_to(prev, next, prev);//进程切换的宏,切换寄存器、栈空间barrier();/* * this_rq must be evaluated again because prev may have moved * CPUs since it called schedule(), thus the 'rq' on its stack * frame will be invalid. */finish_task_switch(this_rq(), prev);}

swith_to宏的代码如下,这是一段汇编代码,源码中已经注释的很明确了,直接看注释

#define switch_to(prev, next, last)\do {\/*\ * Context-switching clobbers all registers, so we clobber\ * them explicitly, via unused output variables.\ * (EAX and EBP is not listed because EBP is saved/restored\ * explicitly for wchan access and EAX is the return value of\ * __switch_to())\ */\unsigned long ebx, ecx, edx, esi, edi;\\asm volatile("pushfl\n\t"/* save    flags */\     "pushl %%ebp\n\t"/* save    EBP   */\     "movl %%esp,%[prev_sp]\n\t"/* save    ESP   */ \     "movl %[next_sp],%%esp\n\t"/* restore ESP   */ \     "movl $1f,%[prev_ip]\n\t"/* save    EIP   */\     "pushl %[next_ip]\n\t"/* restore EIP   */\     __switch_canary\     "jmp __switch_to\n"/* regparm call  */\     "1:\t"\     "popl %%ebp\n\t"/* restore EBP   */\     "popfl\n"/* restore flags */\\     /* output parameters */\     : [prev_sp] "=m" (prev->thread.sp),\       [prev_ip] "=m" (prev->thread.ip),\       "=a" (last),\\       /* clobbered output registers: */\       "=b" (ebx), "=c" (ecx), "=d" (edx),\       "=S" (esi), "=D" (edi)\       \       __switch_canary_oparam\\       /* input parameters: */\     : [next_sp]  "m" (next->thread.sp),\       [next_ip]  "m" (next->thread.ip),\       \       /* regparm parameters for __switch_to(): */\       [prev]     "a" (prev),\       [next]     "d" (next)\\       __switch_canary_iparam\\     : /* reloaded segment registers */\"memory");\} while (0)




0 0
原创粉丝点击