深入理解Linux之copy_process()

来源:互联网 发布:dreamweaver做淘宝分类 编辑:程序博客网 时间:2024/04/30 06:31

copy_process()这个函数其实很简单,由do_fork()调用,参数和do_fork()的参数一样,只是多了一个子进程的PID。copy_process()中,开始时检测clone_flag中的标志位,看看是否允许copy。然后就是创建两个结构体,task_struck和thread_info,用来保存子进程的信息,然后将父进程中的这两个地方的信息复制过来,存到刚刚创建的结构体中。然后更新一下系统中,关于进程数量等信息,更改一下子进程的clone_flag信息。设置子进程运行的CPU,把子进程加入到运行队列。还是在文件Kernel/fork.c中,代码如下:

static struct task_struct *dup_task_struct(struct task_struct *orig){struct task_struct *tsk;struct thread_info *ti;prepare_to_copy(orig);tsk = alloc_task_struct();  //申请空间if (!tsk)return NULL;ti = alloc_thread_info(tsk); //申请thread_info的空间if (!ti) {free_task_struct(tsk);  //如果没有申请到,要把刚刚申请的task_struct空间,一并释放return NULL;}//task_struct中有指向thread_info的指针,thread_info中也有指向task_struct的指针 *ti = *orig->thread_info;  //将父进程的thread_info的值复制到刚刚申请的thread_info空间中*tsk = *orig;              //将父进程的task_struct中的值复制到刚刚申请的子进程的task_struct中tsk->thread_info = ti;     //将子进程的task_struct中的thread_info指针指向刚刚赋值的thread_info空间ti->task = tsk;            //将刚刚申请的thread_info中的task指针指向子进程的task_struct
            //这四行代码看着乱,其实画画图就很好理解了。注意指针赋值和指针指向的空间的赋值。

/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
return tsk;
}

/*
 * This creates a new process as a copy of the old one, * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */static task_t *copy_process(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr, int pid){int retval;struct task_struct *p = NULL;if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))return ERR_PTR(-EINVAL);/* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))return ERR_PTR(-EINVAL);/* * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. Blocking this case allows * for various simplifications in other code. */if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))return ERR_PTR(-EINVAL);retval = security_task_create(clone_flags);if (retval)goto fork_out;retval = -ENOMEM;p = dup_task_struct(current);  //创建子进程的task_struct,并为其复制。if (!p)goto fork_out;retval = -EAGAIN;if (atomic_read(&p->user->processes) >=p->signal->rlim[RLIMIT_NPROC].rlim_cur) {if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&p->user != &root_user)goto bad_fork_free;}atomic_inc(&p->user->__count);atomic_inc(&p->user->processes);get_group_info(p->group_info);/* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */if (nr_threads >= max_threads)goto bad_fork_cleanup_count;if (!try_module_get(p->thread_info->exec_domain->module))goto bad_fork_cleanup_count;if (p->binfmt && !try_module_get(p->binfmt->module))goto bad_fork_cleanup_put_domain;p->did_exec = 0;copy_flags(clone_flags, p);p->pid = pid;retval = -EFAULT;if (clone_flags & CLONE_PARENT_SETTID)if (put_user(p->pid, parent_tidptr))goto bad_fork_cleanup;p->proc_dentry = NULL;INIT_LIST_HEAD(&p->children);INIT_LIST_HEAD(&p->sibling);init_waitqueue_head(&p->wait_chldexit);p->vfork_done = NULL;spin_lock_init(&p->alloc_lock);spin_lock_init(&p->proc_lock);clear_tsk_thread_flag(p, TIF_SIGPENDING);init_sigpending(&p->pending);p->it_real_value = p->it_virt_value = p->it_prof_value = 0;p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;init_timer(&p->real_timer);p->real_timer.data = (unsigned long) p;p->utime = p->stime = 0;p->lock_depth = -1;/* -1 = no lock */do_posix_clock_monotonic_gettime(&p->start_time);p->security = NULL;p->io_context = NULL;p->io_wait = NULL;p->audit_context = NULL;#ifdef CONFIG_NUMA p->mempolicy = mpol_copy(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; goto bad_fork_cleanup; }#endifp->tgid = p->pid;if (clone_flags & CLONE_THREAD)p->tgid = current->tgid;if ((retval = security_task_alloc(p)))goto bad_fork_cleanup_policy;if ((retval = audit_alloc(p)))goto bad_fork_cleanup_security;/* copy all the process information */if ((retval = copy_semundo(clone_flags, p)))goto bad_fork_cleanup_audit;if ((retval = copy_files(clone_flags, p)))goto bad_fork_cleanup_semundo;if ((retval = copy_fs(clone_flags, p)))goto bad_fork_cleanup_files;if ((retval = copy_sighand(clone_flags, p)))goto bad_fork_cleanup_fs;if ((retval = copy_signal(clone_flags, p)))goto bad_fork_cleanup_sighand;if ((retval = copy_mm(clone_flags, p)))goto bad_fork_cleanup_signal;if ((retval = copy_keys(clone_flags, p)))goto bad_fork_cleanup_mm;if ((retval = copy_namespace(clone_flags, p)))goto bad_fork_cleanup_keys;retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);if (retval)goto bad_fork_cleanup_namespace;p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;/* * Clear TID on mm_release()? */p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;/* * Syscall tracing should be turned off in the child regardless * of CLONE_PTRACE. */clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);/* Our parent execution domain becomes current domain   These must match for thread signalling to apply */   p->parent_exec_id = p->self_exec_id;/* ok, now we should be set up.. */p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);p->pdeath_signal = 0;p->exit_state = 0;/* Perform scheduler related setup */sched_fork(p);/* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */p->group_leader = p;INIT_LIST_HEAD(&p->ptrace_children);INIT_LIST_HEAD(&p->ptrace_list);/* Need tasklist lock for parent etc handling! */write_lock_irq(&tasklist_lock);/* * The task hasn't been attached yet, so cpus_allowed mask cannot * have changed. The cpus_allowed mask of the parent may have * changed after it was copied first time, and it may then move to * another CPU - so we re-copy it here and set the child's CPU to * the parent's CPU. This avoids alot of nasty races. */p->cpus_allowed = current->cpus_allowed;set_task_cpu(p, smp_processor_id());/* * Check for pending SIGKILL! The new thread should not be allowed * to slip out of an OOM kill. (or normal SIGKILL.) */if (sigismember(¤t->pending.signal, SIGKILL)) {write_unlock_irq(&tasklist_lock);retval = -EINTR;goto bad_fork_cleanup_namespace;}/* CLONE_PARENT re-uses the old parent */if (clone_flags & (CLONE_PARENT|CLONE_THREAD))p->real_parent = current->real_parent;elsep->real_parent = current;p->parent = p->real_parent;if (clone_flags & CLONE_THREAD) {spin_lock(¤t->sighand->siglock);/* * Important: if an exit-all has been started then * do not create this new thread - the whole thread * group is supposed to exit anyway. */if (current->signal->group_exit) {spin_unlock(¤t->sighand->siglock);write_unlock_irq(&tasklist_lock);retval = -EAGAIN;goto bad_fork_cleanup_namespace;}p->group_leader = current->group_leader;if (current->signal->group_stop_count > 0) {/* * There is an all-stop in progress for the group. * We ourselves will stop as soon as we check signals. * Make the new thread part of that group stop too. */current->signal->group_stop_count++;set_tsk_thread_flag(p, TIF_SIGPENDING);}spin_unlock(¤t->sighand->siglock);}SET_LINKS(p);if (unlikely(p->ptrace & PT_PTRACED))__ptrace_link(p, current->parent);attach_pid(p, PIDTYPE_PID, p->pid);attach_pid(p, PIDTYPE_TGID, p->tgid);if (thread_group_leader(p)) {attach_pid(p, PIDTYPE_PGID, process_group(p));attach_pid(p, PIDTYPE_SID, p->signal->session);if (p->pid)__get_cpu_var(process_counts)++;}nr_threads++;write_unlock_irq(&tasklist_lock);retval = 0;fork_out:if (retval)return ERR_PTR(retval);return p;bad_fork_cleanup_namespace:exit_namespace(p);bad_fork_cleanup_keys:exit_keys(p);bad_fork_cleanup_mm:if (p->mm)mmput(p->mm);bad_fork_cleanup_signal:exit_signal(p);bad_fork_cleanup_sighand:exit_sighand(p);bad_fork_cleanup_fs:exit_fs(p); /* blocking */bad_fork_cleanup_files:exit_files(p); /* blocking */bad_fork_cleanup_semundo:exit_sem(p);bad_fork_cleanup_audit:audit_free(p);bad_fork_cleanup_security:security_task_free(p);bad_fork_cleanup_policy:#ifdef CONFIG_NUMAmpol_free(p->mempolicy);#endifbad_fork_cleanup:if (p->binfmt)module_put(p->binfmt->module);bad_fork_cleanup_put_domain:module_put(p->thread_info->exec_domain->module);bad_fork_cleanup_count:put_group_info(p->group_info);atomic_dec(&p->user->processes);free_uid(p->user);bad_fork_free:free_task(p);goto fork_out;}struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs){memset(regs, 0, sizeof(struct pt_regs));return regs;}task_t * __devinit fork_idle(int cpu){task_t *task;struct pt_regs regs;task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0);if (!task)return ERR_PTR(-ENOMEM);init_idle(task, cpu);unhash_process(task);return task;}static inline int fork_traceflag (unsigned clone_flags){if (clone_flags & CLONE_UNTRACED)return 0;else if (clone_flags & CLONE_VFORK) {if (current->ptrace & PT_TRACE_VFORK)return PTRACE_EVENT_VFORK;} else if ((clone_flags & CSIGNAL) != SIGCHLD) {if (current->ptrace & PT_TRACE_CLONE)return PTRACE_EVENT_CLONE;} else if (current->ptrace & PT_TRACE_FORK)return PTRACE_EVENT_FORK;return 0;}


原创粉丝点击