MTK Kernel启动流程源码解析 3 init_task

来源:互联网 发布:淘宝卖家下架的宝贝在哪里 编辑:程序博客网 时间:2024/05/16 09:15

http://blog.csdn.net/xichangbao/article/details/52859472

init_task是kernel的第一个进程,0号进程,当kernel初始化完成后,它便化身为idle进程出现在我们的面前。

一 init_thread_union

union thread_union init_thread_union __init_task_data =
    { INIT_THREAD_INFO(init_task) };

#define INIT_THREAD_INFO(tsk)                        \
{                                    \
    .task        = &tsk, \\ 进程描述符init_task                       \
    .exec_domain    = &default_exec_domain, \\ Linux执行域               \
    .flags        = 0,                        \
    .preempt_count    = INIT_PREEMPT_COUNT,                \
    .addr_limit    = KERNEL_DS, // 内核地址空间0-0xFFFFFFFF                 \
    .restart_block    = { //重新启动系统调用相关                       \
        .fn    = do_no_restart_syscall,            \
    },                                \
}

struct thread_info { // 定义在kernel/arch/ar,64/include/asm/thread_info.h中
    unsigned long        flags;        /* low level flags */
    mm_segment_t        addr_limit;    /* address limit */
    struct task_struct    *task;        /* main task structure */ // 进程描述符
    struct exec_domain    *exec_domain;    /* execution domain */ // 执行域描述符
    struct restart_block    restart_block;
    int            preempt_count;    /* 0 => preemptable, <0 => bug */
    int            cpu;        /* cpu */
};

struct exec_domain default_exec_domain = { // Linux执行域
    .name       = "Linux",      /* name */
    .handler    = default_handler,  /* lcall7 causes a seg fault. */
    .pers_low   = 0,            /* PER_LINUX personality. */
    .pers_high  = 0,            /* PER_LINUX personality. */
    .signal_map = ident_map,        /* Identity map signals. */
    .signal_invmap  = ident_map,        /*  - both ways. */
};

#define INIT_PREEMPT_COUNT  (1 + PREEMPT_ACTIVE) // 定义在include/linux/sched.h中
#define PREEMPT_ACTIVE  0x40000000 // 定义在arch/arm64/include/asm/thread_info.h中
将preempt_count赋值为0x40000001是为了在scheduler还没初始化前暂时禁掉进程抢占

#define KERNEL_DS   (-1UL) // 内核地址空间0-0xFFFFFFFF

二 init_task

2.1 INIT_TASK(tsk)
struct task_struct init_task = INIT_TASK(init_task);

#define INIT_TASK(tsk)    \
{                                    \
    .state        = 0, // 进程状态,0表示可运行状态,说明正在运行或正准备运行                       \
    .stack        = &init_thread_info, //thread_info               \
    .usage        = ATOMIC_INIT(2), //有2个进程正在使用该结构              \
    .flags        = PF_KTHREAD, // kernel进程                   \
    .prio        = MAX_PRIO-20, // 优先级                   \
    .static_prio    = MAX_PRIO-20,                    \
    .normal_prio    = MAX_PRIO-20,                    \
    .policy        = SCHED_NORMAL, // 采用SCHED_NORMAL调度策略                   \
    .cpus_allowed    = CPU_MASK_ALL, // cpu亲和性                   \
    .nr_cpus_allowed= NR_CPUS, // 该进程可以在全部cpu上运行                   \
    .mm        = NULL,                        \
    .active_mm    = &init_mm, // 见3.2                   \
    .se        = { // 调度实体                       \
        .group_node     = LIST_HEAD_INIT(tsk.se.group_node),    \
    },                                \
    .rt        = { // 实时任务调度实体                       \
        .run_list    = LIST_HEAD_INIT(tsk.rt.run_list),    \
        .time_slice    = RR_TIMESLICE,                \
    },                                \
    .tasks        = LIST_HEAD_INIT(tsk.tasks), // 任务队列           \
    INIT_PUSHABLE_TASKS(tsk) // .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),                   \
    INIT_CGROUP_SCHED(tsk) // .sched_task_group = &root_task_group, // 而root_task_group在sched_init中初始化                       \
    .ptraced    = LIST_HEAD_INIT(tsk.ptraced),            \
    .ptrace_entry    = LIST_HEAD_INIT(tsk.ptrace_entry),        \
    .real_parent    = &tsk, // init_task进程的父进程是他自己                       \
    .parent        = &tsk,// init_task进程的父进程是他自己                       \
    .children    = LIST_HEAD_INIT(tsk.children), // 初始化子进程链表           \
    .sibling    = LIST_HEAD_INIT(tsk.sibling), // 初始化兄弟进程链表           \
    .group_leader    = &tsk, // init_task的线程链表指向它自己                       \
    RCU_POINTER_INITIALIZER(real_cred, &init_cred), // .p = (typeof(*v) __force __rcu *)(v)           \
    RCU_POINTER_INITIALIZER(cred, &init_cred), // rcu指针指向init_cred//init_cred见3.3          \
    .comm        = INIT_TASK_COMM, // #define INIT_TASK_COMM "swapper" // 进程名字为swapper              \
    .thread        = INIT_THREAD, // #define INIT_THREAD  {    }                   \
    .fs        = &init_fs,// 见3.4                   \
    .files        = &init_files,// 见3.5                   \
    .signal        = &init_signals, // 见3.6                \
    .sighand    = &init_sighand, // 见3.7               \
    .nsproxy    = &init_nsproxy, // 见3.8               \
    .pending    = { // 等待处理的signal列表                       \
        .list = LIST_HEAD_INIT(tsk.pending.list),        \
        .signal = {{0}}},                    \
    .blocked    = {{0}}, // 被阻塞的信号集                    \
    .alloc_lock    = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), // 自旋锁       \
    .journal_info    = NULL,                        \
    .cpu_timers    = INIT_CPU_TIMERS(tsk.cpu_timers), // cpu定时器      \
    .pi_lock    = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock),    \
    .timer_slack_ns = 50000, /* 50 usec default slack */ // 设置select()和poll()的超时时间为50ms       \
    .pids = { // 见3.9                           \
        [PIDTYPE_PID]  = INIT_PID_LINK(PIDTYPE_PID), \\ 进程pid       \
        [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \\ 线程组领头线程pid       \
        [PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),        \
    },                                \
    .thread_group    = LIST_HEAD_INIT(tsk.thread_group), // init_task的线程链表       \
    .thread_node    = LIST_HEAD_INIT(init_signals.thread_head),    \
    INIT_IDS // loginuid, sessionid                           \
    INIT_PERF_EVENTS(tsk) \\ Performance Event是一个的性能诊断工具                       \
    INIT_TRACE_IRQFLAGS \\ softirqs_enabled                        \
    INIT_LOCKDEP \\ Linux死锁检测模块                           \
    INIT_FTRACE_GRAPH \\ ftrace跟踪器                       \
    INIT_TRACE_RECURSION                        \
    INIT_TASK_RCU_PREEMPT(tsk) // RCU同步原语                    \
    INIT_TASK_RCU_TASKS(tsk)                    \
    INIT_CPUSET_SEQ(tsk)                        \
    INIT_RT_MUTEXES(tsk) \\ 基于PI协议的等待互斥锁,priority inheritance(优先级继承)                       \
    INIT_VTIME(tsk)                            \
    INIT_KASAN(tsk) \\ Kasan 是 Kernel Address Sanitizer 的缩写,它是一个动态检测内存错误的工具,主要功能是检查内存越界访问和使用已释放的内存等问题。                           \
}

#define init_thread_info    (init_thread_union.thread_info)
#define init_stack        (init_thread_union.stack)




三 init_task相关结构体
本节仅仅是摘录,并没有对结构体内容进行解释说明

3.1 task_struct

struct task_struct { // 定义在kernel/include/linux/sched.h
    volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
    void *stack;
    atomic_t usage;
    unsigned int flags;    /* per process flags, defined below */
    unsigned int ptrace;

#ifdef CONFIG_SMP
    struct llist_node wake_entry;
    int on_cpu;
    struct task_struct *last_wakee;
    unsigned long wakee_flips;
    unsigned long wakee_flip_decay_ts;

    int wake_cpu;
#endif
    int on_rq;

    int prio, static_prio, normal_prio;
    unsigned int rt_priority;
    const struct sched_class *sched_class;
    struct sched_entity se;
    struct sched_rt_entity rt;
#ifdef CONFIG_SCHED_HMP
    struct ravg ravg;
    /*
     * 'init_load_pct' represents the initial task load assigned to children
     * of this task
     */
    u32 init_load_pct;
    u64 last_wake_ts;
    u64 last_switch_out_ts;
#ifdef CONFIG_SCHED_QHMP
    u64 run_start;
#endif
    struct related_thread_group *grp;
    struct list_head grp_list;
#endif
#ifdef CONFIG_CGROUP_SCHED
    struct task_group *sched_task_group;
#endif
    struct sched_dl_entity dl;

#ifdef CONFIG_PREEMPT_NOTIFIERS
    /* list of struct preempt_notifier: */
    struct hlist_head preempt_notifiers;
#endif

#ifdef CONFIG_BLK_DEV_IO_TRACE
    unsigned int btrace_seq;
#endif

    unsigned int policy;
    int nr_cpus_allowed;
    cpumask_t cpus_allowed;

#ifdef CONFIG_PREEMPT_RCU
    int rcu_read_lock_nesting;
    union rcu_special rcu_read_unlock_special;
    struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU
    struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
    unsigned long rcu_tasks_nvcsw;
    bool rcu_tasks_holdout;
    struct list_head rcu_tasks_holdout_list;
    int rcu_tasks_idle_cpu;
#endif /* #ifdef CONFIG_TASKS_RCU */

#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
    struct sched_info sched_info;
#endif

    struct list_head tasks;
#ifdef CONFIG_SMP
    struct plist_node pushable_tasks;
    struct rb_node pushable_dl_tasks;
#endif

    struct mm_struct *mm, *active_mm;
#ifdef CONFIG_COMPAT_BRK
    unsigned brk_randomized:1;
#endif
    /* per-thread vma caching */
    u32 vmacache_seqnum;
    struct vm_area_struct *vmacache[VMACACHE_SIZE];
#if defined(SPLIT_RSS_COUNTING)
    struct task_rss_stat    rss_stat;
#endif
/* task state */
    int exit_state;
    int exit_code, exit_signal;
    int pdeath_signal;  /*  The signal sent when the parent dies  */
    unsigned int jobctl;    /* JOBCTL_*, siglock protected */

    /* Used for emulating ABI behavior of previous Linux versions */
    unsigned int personality;

    unsigned in_execve:1;    /* Tell the LSMs that the process is doing an
                 * execve */
    unsigned in_iowait:1;

    /* Revert to default priority/policy when forking */
    unsigned sched_reset_on_fork:1;
    unsigned sched_contributes_to_load:1;

    unsigned long atomic_flags; /* Flags needing atomic access. */

    pid_t pid;
    pid_t tgid;

#ifdef CONFIG_CC_STACKPROTECTOR
    /* Canary value for the -fstack-protector gcc feature */
    unsigned long stack_canary;
#endif
    /*
     * pointers to (original) parent process, youngest child, younger sibling,
     * older sibling, respectively.  (p->father can be replaced with
     * p->real_parent->pid)
     */
    struct task_struct __rcu *real_parent; /* real parent process */
    struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
    /*
     * children/sibling forms the list of my natural children
     */
    struct list_head children;    /* list of my children */
    struct list_head sibling;    /* linkage in my parent's children list */
    struct task_struct *group_leader;    /* threadgroup leader */

    /*
     * ptraced is the list of tasks this task is using ptrace on.
     * This includes both natural children and PTRACE_ATTACH targets.
     * p->ptrace_entry is p's link on the p->parent->ptraced list.
     */
    struct list_head ptraced;
    struct list_head ptrace_entry;

    /* PID/PID hash table linkage. */
    struct pid_link pids[PIDTYPE_MAX];
    struct list_head thread_group;
    struct list_head thread_node;

    struct completion *vfork_done;        /* for vfork() */
    int __user *set_child_tid;        /* CLONE_CHILD_SETTID */
    int __user *clear_child_tid;        /* CLONE_CHILD_CLEARTID */

    cputime_t utime, stime, utimescaled, stimescaled;
    cputime_t gtime;
    unsigned long long cpu_power;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
    struct cputime prev_cputime;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
    seqlock_t vtime_seqlock;
    unsigned long long vtime_snap;
    enum {
        VTIME_SLEEPING = 0,
        VTIME_USER,
        VTIME_SYS,
    } vtime_snap_whence;
#endif
    unsigned long nvcsw, nivcsw; /* context switch counts */
    u64 start_time;        /* monotonic time in nsec */
    u64 real_start_time;    /* boot based time in nsec */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
    unsigned long min_flt, maj_flt;

    struct task_cputime cputime_expires;
    struct list_head cpu_timers[3];

/* process credentials */
    const struct cred __rcu *real_cred; /* objective and real subjective task
                     * credentials (COW) */
    const struct cred __rcu *cred;    /* effective (overridable) subjective task
                     * credentials (COW) */
    char comm[TASK_COMM_LEN]; /* executable name excluding path
                     - access with [gs]et_task_comm (which lock
                       it with task_lock())
                     - initialized normally by setup_new_exec */
/* file system info */
    int link_count, total_link_count;
#ifdef CONFIG_SYSVIPC
/* ipc stuff */
    struct sysv_sem sysvsem;
    struct sysv_shm sysvshm;
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
/* hung task detection */
    unsigned long last_switch_count;
#endif
/* CPU-specific state of this task */
    struct thread_struct thread;
/* filesystem information */
    struct fs_struct *fs;
/* open file information */
    struct files_struct *files;
/* namespaces */
    struct nsproxy *nsproxy;
/* signal handlers */
    struct signal_struct *signal;
    struct sighand_struct *sighand;

    sigset_t blocked, real_blocked;
    sigset_t saved_sigmask;    /* restored if set_restore_sigmask() was used */
    struct sigpending pending;

    unsigned long sas_ss_sp;
    size_t sas_ss_size;
    int (*notifier)(void *priv);
    void *notifier_data;
    sigset_t *notifier_mask;
    struct callback_head *task_works;

    struct audit_context *audit_context;
#ifdef CONFIG_AUDITSYSCALL
    kuid_t loginuid;
    unsigned int sessionid;
#endif
    struct seccomp seccomp;

/* Thread group tracking */
       u32 parent_exec_id;
       u32 self_exec_id;
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
 * mempolicy */
    spinlock_t alloc_lock;

    /* Protection of the PI data structures: */
    raw_spinlock_t pi_lock;

#ifdef CONFIG_RT_MUTEXES
    /* PI waiters blocked on a rt_mutex held by this task */
    struct rb_root pi_waiters;
    struct rb_node *pi_waiters_leftmost;
    /* Deadlock detection and priority inheritance handling */
    struct rt_mutex_waiter *pi_blocked_on;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
    /* mutex deadlock detection */
    struct mutex_waiter *blocked_on;
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
    unsigned int irq_events;
    unsigned long hardirq_enable_ip;
    unsigned long hardirq_disable_ip;
    unsigned int hardirq_enable_event;
    unsigned int hardirq_disable_event;
    int hardirqs_enabled;
    int hardirq_context;
    unsigned long softirq_disable_ip;
    unsigned long softirq_enable_ip;
    unsigned int softirq_disable_event;
    unsigned int softirq_enable_event;
    int softirqs_enabled;
    int softirq_context;
#endif
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
    u64 curr_chain_key;
    int lockdep_depth;
    unsigned int lockdep_recursion;
    struct held_lock held_locks[MAX_LOCK_DEPTH];
    gfp_t lockdep_reclaim_gfp;
#endif

/* journalling filesystem info */
    void *journal_info;

/* stacked block device info */
    struct bio_list *bio_list;

#ifdef CONFIG_BLOCK
/* stack plugging */
    struct blk_plug *plug;
#endif

/* VM state */
    struct reclaim_state *reclaim_state;

    struct backing_dev_info *backing_dev_info;

    struct io_context *io_context;

    unsigned long ptrace_message;
    siginfo_t *last_siginfo; /* For ptrace use.  */
    struct task_io_accounting ioac;
#if defined(CONFIG_TASK_XACCT)
    u64 acct_rss_mem1;    /* accumulated rss usage */
    u64 acct_vm_mem1;    /* accumulated virtual memory usage */
    cputime_t acct_timexpd;    /* stime + utime since last update */
#endif
#ifdef CONFIG_CPUSETS
    nodemask_t mems_allowed;    /* Protected by alloc_lock */
    seqcount_t mems_allowed_seq;    /* Seqence no to catch updates */
    int cpuset_mem_spread_rotor;
    int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
    /* Control Group info protected by css_set_lock */
    struct css_set __rcu *cgroups;
    /* cg_list protected by css_set_lock and tsk->alloc_lock */
    struct list_head cg_list;
#endif
#ifdef CONFIG_FUTEX
    struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
    struct compat_robust_list_head __user *compat_robust_list;
#endif
    struct list_head pi_state_list;
    struct futex_pi_state *pi_state_cache;
#endif
#ifdef CONFIG_PERF_EVENTS
    struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
    struct mutex perf_event_mutex;
    struct list_head perf_event_list;
#endif
#ifdef CONFIG_DEBUG_PREEMPT
    unsigned long preempt_disable_ip;
#endif
#ifdef CONFIG_NUMA
    struct mempolicy *mempolicy;    /* Protected by alloc_lock */
    short il_next;
    short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCING
    int numa_scan_seq;
    unsigned int numa_scan_period;
    unsigned int numa_scan_period_max;
    int numa_preferred_nid;
    unsigned long numa_migrate_retry;
    u64 node_stamp;            /* migration stamp  */
    u64 last_task_numa_placement;
    u64 last_sum_exec_runtime;
    struct callback_head numa_work;

    struct list_head numa_entry;
    struct numa_group *numa_group;

    /*
     * Exponential decaying average of faults on a per-node basis.
     * Scheduling placement decisions are made based on the these counts.
     * The values remain static for the duration of a PTE scan
     */
    unsigned long *numa_faults_memory;
    unsigned long total_numa_faults;

    /*
     * numa_faults_buffer records faults per node during the current
     * scan window. When the scan completes, the counts in
     * numa_faults_memory decay and these values are copied.
     */
    unsigned long *numa_faults_buffer_memory;

    /*
     * Track the nodes the process was running on when a NUMA hinting
     * fault was incurred.
     */
    unsigned long *numa_faults_cpu;
    unsigned long *numa_faults_buffer_cpu;

    /*
     * numa_faults_locality tracks if faults recorded during the last
     * scan window were remote/local. The task scan period is adapted
     * based on the locality of the faults with different weights
     * depending on whether they were shared or private faults
     */
    unsigned long numa_faults_locality[2];

    unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */

    struct rcu_head rcu;

    /*
     * cache last used pipe for splice
     */
    struct pipe_inode_info *splice_pipe;

    struct page_frag task_frag;

#ifdef    CONFIG_TASK_DELAY_ACCT
    struct task_delay_info *delays;
#endif
#ifdef CONFIG_FAULT_INJECTION
    int make_it_fail;
#endif
    /*
     * when (nr_dirtied >= nr_dirtied_pause), it's time to call
     * balance_dirty_pages() for some dirty throttling pause
     */
    int nr_dirtied;
    int nr_dirtied_pause;
    unsigned long dirty_paused_when; /* start of a write-and-pause period */

#ifdef CONFIG_LATENCYTOP
    int latency_record_count;
    struct latency_record latency_record[LT_SAVECOUNT];
#endif
    /*
     * time slack values; these are used to round up poll() and
     * select() etc timeout values. These are in nanoseconds.
     */
    unsigned long timer_slack_ns;
    unsigned long default_timer_slack_ns;

#ifdef CONFIG_KASAN
    unsigned int kasan_depth;
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    /* Index of current stored address in ret_stack */
    int curr_ret_stack;
    /* Stack of return addresses for return function tracing */
    struct ftrace_ret_stack    *ret_stack;
    /* time stamp for last schedule */
    unsigned long long ftrace_timestamp;
    /*
     * Number of functions that haven't been traced
     * because of depth overrun.
     */
    atomic_t trace_overrun;
    /* Pause for the tracing */
    atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
    /* state flags for use by tracers */
    unsigned long trace;
    /* bitmask and counter of trace recursion */
    unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
    unsigned int memcg_kmem_skip_account;
    struct memcg_oom_info {
        struct mem_cgroup *memcg;
        gfp_t gfp_mask;
        int order;
        unsigned int may_oom:1;
    } memcg_oom;
#endif
#ifdef CONFIG_UPROBES
    struct uprobe_task *utask;
#endif
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
    unsigned int    sequential_io;
    unsigned int    sequential_io_avg;
#endif
};



3.2 init_mm

内存描述符
struct mm_struct init_mm = {
    .mm_rb      = RB_ROOT,
    .pgd        = swapper_pg_dir,
    .mm_users   = ATOMIC_INIT(2),
    .mm_count   = ATOMIC_INIT(1),
    .mmap_sem   = __RWSEM_INITIALIZER(init_mm.mmap_sem),
    .page_table_lock =  __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
    .mmlist     = LIST_HEAD_INIT(init_mm.mmlist),
    INIT_MM_CONTEXT(init_mm)
};

struct mm_struct {
    struct vm_area_struct *mmap;        /* list of VMAs */
    struct rb_root mm_rb;
    u32 vmacache_seqnum;                   /* per-thread vmacache */
#ifdef CONFIG_MMU
    unsigned long (*get_unmapped_area) (struct file *filp,
                unsigned long addr, unsigned long len,
                unsigned long pgoff, unsigned long flags);
#endif
    unsigned long mmap_base;        /* base of mmap area */
    unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
    unsigned long task_size;        /* size of task vm space */
    unsigned long highest_vm_end;        /* highest vma end address */
    pgd_t * pgd;
    atomic_t mm_users;            /* How many users with user space? */
    atomic_t mm_count;            /* How many references to "struct mm_struct" (users count as 1) */
    atomic_long_t nr_ptes;            /* Page table pages */
    int map_count;                /* number of VMAs */

    spinlock_t page_table_lock;        /* Protects page tables and some counters */
    struct rw_semaphore mmap_sem;

    struct list_head mmlist;        /* List of maybe swapped mm's.    These are globally strung
                         * together off init_mm.mmlist, and are protected
                         * by mmlist_lock
                         */


    unsigned long hiwater_rss;    /* High-watermark of RSS usage */
    unsigned long hiwater_vm;    /* High-water virtual memory usage */

    unsigned long total_vm;        /* Total pages mapped */
    unsigned long locked_vm;    /* Pages that have PG_mlocked set */
    unsigned long pinned_vm;    /* Refcount permanently increased */
    unsigned long shared_vm;    /* Shared pages (files) */
    unsigned long exec_vm;        /* VM_EXEC & ~VM_WRITE */
    unsigned long stack_vm;        /* VM_GROWSUP/DOWN */
    unsigned long def_flags;
    unsigned long start_code, end_code, start_data, end_data;
    unsigned long start_brk, brk, start_stack;
    unsigned long arg_start, arg_end, env_start, env_end;

    unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */

    /*
     * Special counters, in some configurations protected by the
     * page_table_lock, in other configurations by being atomic.
     */
    struct mm_rss_stat rss_stat;

    struct linux_binfmt *binfmt;

    cpumask_var_t cpu_vm_mask_var;

    /* Architecture-specific MM context */
    mm_context_t context;

    unsigned long flags; /* Must use atomic bitops to access the bits */

    struct core_state *core_state; /* coredumping support */
#ifdef CONFIG_AIO
    spinlock_t            ioctx_lock;
    struct kioctx_table __rcu    *ioctx_table;
#endif
#ifdef CONFIG_MEMCG
    /*
     * "owner" points to a task that is regarded as the canonical
     * user/owner of this mm. All of the following must be true in
     * order for it to be changed:
     *
     * current == mm->owner
     * current->mm != mm
     * new_owner->mm == mm
     * new_owner->alloc_lock is held
     */
    struct task_struct __rcu *owner;
#endif

    /* store ref to file /proc/<pid>/exe symlink points to */
    struct file *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
    struct mmu_notifier_mm *mmu_notifier_mm;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
    pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
    struct cpumask cpumask_allocation;
#endif
#ifdef CONFIG_NUMA_BALANCING
    /*
     * numa_next_scan is the next time that the PTEs will be marked
     * pte_numa. NUMA hinting faults will gather statistics and migrate
     * pages to new nodes if necessary.
     */
    unsigned long numa_next_scan;

    /* Restart point for scanning and setting pte_numa */
    unsigned long numa_scan_offset;

    /* numa_scan_seq prevents two threads setting pte_numa */
    int numa_scan_seq;
#endif
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
    /*
     * An operation with batched TLB flushing is going on. Anything that
     * can move process memory needs to flush the TLB when moving a
     * PROT_NONE or PROT_NUMA mapped page.
     */
    bool tlb_flush_pending;
#endif
    struct uprobes_state uprobes_state;
#ifdef CONFIG_MSM_APP_SETTINGS
    int app_setting;
#endif

};


.3.3 init_cred

struct cred init_cred = {
    .usage          = ATOMIC_INIT(4),
#ifdef CONFIG_DEBUG_CREDENTIALS
    .subscribers        = ATOMIC_INIT(2),
    .magic          = CRED_MAGIC,
#endif
    .uid            = GLOBAL_ROOT_UID,
    .gid            = GLOBAL_ROOT_GID,
    .suid           = GLOBAL_ROOT_UID,
    .sgid           = GLOBAL_ROOT_GID,
    .euid           = GLOBAL_ROOT_UID,
    .egid           = GLOBAL_ROOT_GID,
    .fsuid          = GLOBAL_ROOT_UID,
    .fsgid          = GLOBAL_ROOT_GID,
    .securebits     = SECUREBITS_DEFAULT,
    .cap_inheritable    = CAP_EMPTY_SET,
    .cap_permitted      = CAP_FULL_SET,
    .cap_effective      = CAP_FULL_SET,
    .cap_bset       = CAP_FULL_SET,
    .user           = INIT_USER,
    .user_ns        = &init_user_ns,
    .group_info     = &init_groups,
};

3.4 init_fs

struct fs_struct init_fs = {
    .users        = 1,
    .lock        = __SPIN_LOCK_UNLOCKED(init_fs.lock),
    .seq        = SEQCNT_ZERO(init_fs.seq),
    .umask        = 0022,
};


3.5 init_files

struct files_struct init_files = {
    .count        = ATOMIC_INIT(1),
    .fdt        = &init_files.fdtab,
    .fdtab        = {
        .max_fds    = NR_OPEN_DEFAULT,
        .fd        = &init_files.fd_array[0],
        .close_on_exec    = init_files.close_on_exec_init,
        .open_fds    = init_files.open_fds_init,
    },
    .file_lock    = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
};


3.6 init_signals

static struct signal_struct init_signals = INIT_SIGNALS(init_signals);

#define INIT_SIGNALS(sig) {                        \
    .nr_threads    = 1,                        \
    .thread_head    = LIST_HEAD_INIT(init_task.thread_node),    \
    .wait_chldexit    = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
    .shared_pending    = {                         \
        .list = LIST_HEAD_INIT(sig.shared_pending.list),    \
        .signal =  {{0}}},                    \
    .posix_timers     = LIST_HEAD_INIT(sig.posix_timers),        \
    .cpu_timers    = INIT_CPU_TIMERS(sig.cpu_timers),        \
    .rlim        = INIT_RLIMITS,                    \
    .cputimer    = {                         \
        .cputime = INIT_CPUTIME,                \
        .running = 0,                        \
        .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock),    \
    },                                \
    .cred_guard_mutex =                        \
         __MUTEX_INITIALIZER(sig.cred_guard_mutex),        \
    INIT_GROUP_RWSEM(sig)                        \
}

3.7 init_sighand

static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);

#define INIT_SIGHAND(sighand) {                        \
    .count        = ATOMIC_INIT(1),                 \
    .action        = { { { .sa_handler = SIG_DFL, } }, },        \
    .siglock    = __SPIN_LOCK_UNLOCKED(sighand.siglock),    \
    .signalfd_wqh    = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh),    \
}

3.8 init_nsproxy

struct nsproxy init_nsproxy = {
    .count            = ATOMIC_INIT(1),
    .uts_ns            = &init_uts_ns, // UTS命名空间包含了运行内核的名称、版本、底层体系结构类型等信息
#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) // 进程间通信IPC命名空间
    .ipc_ns            = &init_ipc_ns,
#endif
    .mnt_ns            = NULL, // 挂载命名空间
    .pid_ns_for_children    = &init_pid_ns, // PID命名空间
#ifdef CONFIG_NET
    .net_ns            = &init_net, // 网络命名空间
#endif
};

struct uts_namespace init_uts_ns = {
    .kref = {
        .refcount    = ATOMIC_INIT(2),
    },
    .name = {
        .sysname    = UTS_SYSNAME,
        .nodename    = UTS_NODENAME,
        .release    = UTS_RELEASE,
        .version    = UTS_VERSION,
        .machine    = UTS_MACHINE,
        .domainname    = UTS_DOMAINNAME,
    },
    .user_ns = &init_user_ns,
    .proc_inum = PROC_UTS_INIT_INO,
};

struct ipc_namespace init_ipc_ns = {
    .count        = ATOMIC_INIT(1),
    .user_ns = &init_user_ns,
    .proc_inum = PROC_IPC_INIT_INO,
};

struct pid_namespace init_pid_ns = {
    .kref = {
        .refcount       = ATOMIC_INIT(2),
    },
    .pidmap = {
        [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
    },
    .last_pid = 0,
    .nr_hashed = PIDNS_HASH_ADDING,
    .level = 0,
    .child_reaper = &init_task,
    .user_ns = &init_user_ns,
    .proc_inum = PROC_PID_INIT_INO,
};

struct net init_net = {
    .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
};

3.9 INIT_PID_LINK

#define INIT_PID_LINK(type)                     \
{                                \
    .node = {                        \
        .next = NULL,                    \
        .pprev = NULL,                    \
    },                            \
    .pid = &init_struct_pid,                \
}

struct pid init_struct_pid = INIT_STRUCT_PID;

#define INIT_STRUCT_PID {                        \
    .count         = ATOMIC_INIT(1),                \
    .tasks        = {                        \
        { .first = NULL },                    \
        { .first = NULL },                    \
        { .first = NULL },                    \
    },                                \
    .level        = 0,                        \
    .numbers    = { {                        \
        .nr        = 0,                    \
        .ns        = &init_pid_ns,                \
        .pid_chain    = { .next = NULL, .pprev = NULL },    \
    }, }                                \
}

struct pid_namespace init_pid_ns = {
    .kref = {
        .refcount       = ATOMIC_INIT(2),
    },
    .pidmap = {
        [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
    },
    .last_pid = 0,
    .nr_hashed = PIDNS_HASH_ADDING,
    .level = 0,
    .child_reaper = &init_task,
    .user_ns = &init_user_ns,
    .proc_inum = PROC_PID_INIT_INO,
};


0 0