thread_info

来源:互联网 发布:python ftp 传输模式 编辑:程序博客网 时间:2024/05/23 14:35

Linux通过task_struct来描述一个进程/线程的信息,包括内存映射、打开的文件描述符、进程/线程状态等等,整个的数据结构也非常大;如下图所示:

struct task_struct {#ifdef CONFIG_THREAD_INFO_IN_TASK/* * For reasons of header soup (see current_thread_info()), this * must be the first element of task_struct. */struct thread_infothread_info;#endif/* -1 unrunnable, 0 runnable, >0 stopped: */volatile longstate;void*stack;atomic_tusage;/* Per task flags (PF_*), defined further below: */unsigned intflags;unsigned intptrace;#ifdef CONFIG_SMPstruct llist_nodewake_entry;inton_cpu;#ifdef CONFIG_THREAD_INFO_IN_TASK/* Current CPU: */unsigned intcpu;#endifunsigned intwakee_flips;unsigned longwakee_flip_decay_ts;struct task_struct*last_wakee;intwake_cpu;#endifinton_rq;intprio;intstatic_prio;intnormal_prio;unsigned intrt_priority;const struct sched_class*sched_class;struct sched_entityse;struct sched_rt_entityrt;#ifdef CONFIG_CGROUP_SCHEDstruct task_group*sched_task_group;#endifstruct sched_dl_entitydl;#ifdef CONFIG_PREEMPT_NOTIFIERS/* List of struct preempt_notifier: */struct hlist_headpreempt_notifiers;#endif#ifdef CONFIG_BLK_DEV_IO_TRACEunsigned intbtrace_seq;#endifunsigned intpolicy;intnr_cpus_allowed;cpumask_tcpus_allowed;#ifdef CONFIG_PREEMPT_RCUintrcu_read_lock_nesting;union rcu_specialrcu_read_unlock_special;struct list_headrcu_node_entry;struct rcu_node*rcu_blocked_node;#endif /* #ifdef CONFIG_PREEMPT_RCU */#ifdef CONFIG_TASKS_RCUunsigned longrcu_tasks_nvcsw;boolrcu_tasks_holdout;struct list_headrcu_tasks_holdout_list;intrcu_tasks_idle_cpu;#endif /* #ifdef CONFIG_TASKS_RCU */struct sched_infosched_info;struct list_headtasks;#ifdef CONFIG_SMPstruct plist_nodepushable_tasks;struct rb_nodepushable_dl_tasks;#endifstruct mm_struct*mm;struct mm_struct*active_mm;/* Per-thread vma caching: */struct vmacachevmacache;#ifdef SPLIT_RSS_COUNTINGstruct task_rss_statrss_stat;#endifintexit_state;intexit_code;intexit_signal;/* The signal sent when the parent dies: */intpdeath_signal;/* JOBCTL_*, siglock protected: */unsigned longjobctl;/* Used for emulating ABI behavior of previous Linux versions: */unsigned intpersonality;/* Scheduler bits, serialized by scheduler locks: */unsignedsched_reset_on_fork:1;unsignedsched_contributes_to_load:1;unsignedsched_migrated:1;unsignedsched_remote_wakeup:1;/* Force alignment to the next boundary: */unsigned:0;/* Unserialized, strictly 'current' *//* Bit to tell LSMs we're in execve(): */unsignedin_execve:1;unsignedin_iowait:1;#ifndef TIF_RESTORE_SIGMASKunsignedrestore_sigmask:1;#endif#ifdef CONFIG_MEMCGunsignedmemcg_may_oom:1;#ifndef CONFIG_SLOBunsignedmemcg_kmem_skip_account:1;#endif#endif#ifdef CONFIG_COMPAT_BRKunsignedbrk_randomized:1;#endif#ifdef CONFIG_CGROUPS/* disallow userland-initiated cgroup migration */unsignedno_cgroup_migration:1;#endifunsigned longatomic_flags; /* Flags requiring atomic access. */struct restart_blockrestart_block;pid_tpid;pid_ttgid;#ifdef CONFIG_CC_STACKPROTECTOR/* Canary value for the -fstack-protector GCC feature: */unsigned longstack_canary;#endif/* * Pointers to the (original) parent process, youngest child, younger sibling, * older sibling, respectively.  (p->father can be replaced with * p->real_parent->pid) *//* Real parent process: */struct task_struct __rcu*real_parent;/* Recipient of SIGCHLD, wait4() reports: */struct task_struct __rcu*parent;/* * Children/sibling form the list of natural children: */struct list_headchildren;struct list_headsibling;struct task_struct*group_leader;/* * 'ptraced' is the list of tasks this task is using ptrace() on. * * This includes both natural children and PTRACE_ATTACH targets. * 'ptrace_entry' is this task's link on the p->parent->ptraced list. */struct list_headptraced;struct list_headptrace_entry;/* PID/PID hash table linkage. */struct pid_linkpids[PIDTYPE_MAX];struct list_headthread_group;struct list_headthread_node;struct completion*vfork_done;/* CLONE_CHILD_SETTID: */int __user*set_child_tid;/* CLONE_CHILD_CLEARTID: */int __user*clear_child_tid;u64utime;u64stime;#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIMEu64utimescaled;u64stimescaled;#endifu64gtime;struct prev_cputimeprev_cputime;#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GENseqcount_tvtime_seqcount;unsigned long longvtime_snap;enum {/* Task is sleeping or running in a CPU with VTIME inactive: */VTIME_INACTIVE = 0,/* Task runs in userspace in a CPU with VTIME active: */VTIME_USER,/* Task runs in kernelspace in a CPU with VTIME active: */VTIME_SYS,} vtime_snap_whence;#endif#ifdef CONFIG_NO_HZ_FULLatomic_ttick_dep_mask;#endif/* Context switch counts: */unsigned longnvcsw;unsigned longnivcsw;/* Monotonic time in nsecs: */u64start_time;/* Boot based time in nsecs: */u64real_start_time;/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */unsigned longmin_flt;unsigned longmaj_flt;#ifdef CONFIG_POSIX_TIMERSstruct task_cputimecputime_expires;struct list_headcpu_timers[3];#endif/* Process credentials: *//* Tracer's credentials at attach: */const struct cred __rcu*ptracer_cred;/* Objective and real subjective task credentials (COW): */const struct cred __rcu*real_cred;/* Effective (overridable) subjective task credentials (COW): */const struct cred __rcu*cred;/* * executable name, excluding path. * * - normally initialized setup_new_exec() * - access it with [gs]et_task_comm() * - lock it with task_lock() */charcomm[TASK_COMM_LEN];struct nameidata*nameidata;#ifdef CONFIG_SYSVIPCstruct sysv_semsysvsem;struct sysv_shmsysvshm;#endif#ifdef CONFIG_DETECT_HUNG_TASKunsigned longlast_switch_count;#endif/* Filesystem information: */struct fs_struct*fs;/* Open file information: */struct files_struct*files;/* Namespaces: */struct nsproxy*nsproxy;/* Signal handlers: */struct signal_struct*signal;struct sighand_struct*sighand;sigset_tblocked;sigset_treal_blocked;/* Restored if set_restore_sigmask() was used: */sigset_tsaved_sigmask;struct sigpendingpending;unsigned longsas_ss_sp;size_tsas_ss_size;unsigned intsas_ss_flags;struct callback_head*task_works;struct audit_context*audit_context;#ifdef CONFIG_AUDITSYSCALLkuid_tloginuid;unsigned intsessionid;#endifstruct seccompseccomp;/* Thread group tracking: */u32parent_exec_id;u32self_exec_id;/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */spinlock_talloc_lock;/* Protection of the PI data structures: */raw_spinlock_tpi_lock;struct wake_q_nodewake_q;#ifdef CONFIG_RT_MUTEXES/* PI waiters blocked on a rt_mutex held by this task: */struct rb_rootpi_waiters;struct rb_node*pi_waiters_leftmost;/* Updated under owner's pi_lock and rq lock */struct task_struct*pi_top_task;/* Deadlock detection and priority inheritance handling: */struct rt_mutex_waiter*pi_blocked_on;#endif#ifdef CONFIG_DEBUG_MUTEXES/* Mutex deadlock detection: */struct mutex_waiter*blocked_on;#endif#ifdef CONFIG_TRACE_IRQFLAGSunsigned intirq_events;unsigned longhardirq_enable_ip;unsigned longhardirq_disable_ip;unsigned inthardirq_enable_event;unsigned inthardirq_disable_event;inthardirqs_enabled;inthardirq_context;unsigned longsoftirq_disable_ip;unsigned longsoftirq_enable_ip;unsigned intsoftirq_disable_event;unsigned intsoftirq_enable_event;intsoftirqs_enabled;intsoftirq_context;#endif#ifdef CONFIG_LOCKDEP# define MAX_LOCK_DEPTH48ULu64curr_chain_key;intlockdep_depth;unsigned intlockdep_recursion;struct held_lockheld_locks[MAX_LOCK_DEPTH];gfp_tlockdep_reclaim_gfp;#endif#ifdef CONFIG_UBSANunsigned intin_ubsan;#endif/* Journalling filesystem info: */void*journal_info;/* Stacked block device info: */struct bio_list*bio_list;#ifdef CONFIG_BLOCK/* Stack plugging: */struct blk_plug*plug;#endif/* VM state: */struct reclaim_state*reclaim_state;struct backing_dev_info*backing_dev_info;struct io_context*io_context;/* Ptrace state: */unsigned longptrace_message;siginfo_t*last_siginfo;struct task_io_accountingioac;#ifdef CONFIG_TASK_XACCT/* Accumulated RSS usage: */u64acct_rss_mem1;/* Accumulated virtual memory usage: */u64acct_vm_mem1;/* stime + utime since last update: */u64acct_timexpd;#endif#ifdef CONFIG_CPUSETS/* Protected by ->alloc_lock: */nodemask_tmems_allowed;/* Seqence number to catch updates: */seqcount_tmems_allowed_seq;intcpuset_mem_spread_rotor;intcpuset_slab_spread_rotor;#endif#ifdef CONFIG_CGROUPS/* Control Group info protected by css_set_lock: */struct css_set __rcu*cgroups;/* cg_list protected by css_set_lock and tsk->alloc_lock: */struct list_headcg_list;#endif#ifdef CONFIG_INTEL_RDT_Aintclosid;#endif#ifdef CONFIG_FUTEXstruct robust_list_head __user*robust_list;#ifdef CONFIG_COMPATstruct compat_robust_list_head __user *compat_robust_list;#endifstruct list_headpi_state_list;struct futex_pi_state*pi_state_cache;#endif#ifdef CONFIG_PERF_EVENTSstruct perf_event_context*perf_event_ctxp[perf_nr_task_contexts];struct mutexperf_event_mutex;struct list_headperf_event_list;#endif#ifdef CONFIG_DEBUG_PREEMPTunsigned longpreempt_disable_ip;#endif#ifdef CONFIG_NUMA/* Protected by alloc_lock: */struct mempolicy*mempolicy;shortil_next;shortpref_node_fork;#endif#ifdef CONFIG_NUMA_BALANCINGintnuma_scan_seq;unsigned intnuma_scan_period;unsigned intnuma_scan_period_max;intnuma_preferred_nid;unsigned longnuma_migrate_retry;/* Migration stamp: */u64node_stamp;u64last_task_numa_placement;u64last_sum_exec_runtime;struct callback_headnuma_work;struct list_headnuma_entry;struct numa_group*numa_group;/* * numa_faults is an array split into four regions: * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer * in this precise order. * * faults_memory: Exponential decaying average of faults on a per-node * basis. Scheduling placement decisions are made based on these * counts. The values remain static for the duration of a PTE scan. * faults_cpu: Track the nodes the process was running on when a NUMA * hinting fault was incurred. * faults_memory_buffer and faults_cpu_buffer: Record faults per node * during the current scan window. When the scan completes, the counts * in faults_memory and faults_cpu decay and these values are copied. */unsigned long*numa_faults;unsigned longtotal_numa_faults;/* * numa_faults_locality tracks if faults recorded during the last * scan window were remote/local or failed to migrate. The task scan * period is adapted based on the locality of the faults with different * weights depending on whether they were shared or private faults */unsigned longnuma_faults_locality[3];unsigned longnuma_pages_migrated;#endif /* CONFIG_NUMA_BALANCING */struct tlbflush_unmap_batchtlb_ubc;struct rcu_headrcu;/* Cache last used pipe for splice(): */struct pipe_inode_info*splice_pipe;struct page_fragtask_frag;#ifdef CONFIG_TASK_DELAY_ACCTstruct task_delay_info*delays;#endif#ifdef CONFIG_FAULT_INJECTIONintmake_it_fail;#endif/* * When (nr_dirtied >= nr_dirtied_pause), it's time to call * balance_dirty_pages() for a dirty throttling pause: */intnr_dirtied;intnr_dirtied_pause;/* Start of a write-and-pause period: */unsigned longdirty_paused_when;#ifdef CONFIG_LATENCYTOPintlatency_record_count;struct latency_recordlatency_record[LT_SAVECOUNT];#endif/* * Time slack values; these are used to round up poll() and * select() etc timeout values. These are in nanoseconds. */u64timer_slack_ns;u64default_timer_slack_ns;#ifdef CONFIG_KASANunsigned intkasan_depth;#endif#ifdef CONFIG_FUNCTION_GRAPH_TRACER/* Index of current stored address in ret_stack: */intcurr_ret_stack;/* Stack of return addresses for return function tracing: */struct ftrace_ret_stack*ret_stack;/* Timestamp for last schedule: */unsigned long longftrace_timestamp;/* * Number of functions that haven't been traced * because of depth overrun: */atomic_ttrace_overrun;/* Pause tracing: */atomic_ttracing_graph_pause;#endif#ifdef CONFIG_TRACING/* State flags for use by tracers: */unsigned longtrace;/* Bitmask and counter of trace recursion: */unsigned longtrace_recursion;#endif /* CONFIG_TRACING */#ifdef CONFIG_KCOV/* Coverage collection mode enabled for this task (0 if disabled): */enum kcov_modekcov_mode;/* Size of the kcov_area: */unsigned intkcov_size;/* Buffer for coverage collection: */void*kcov_area;/* KCOV descriptor wired with this task or NULL: */struct kcov*kcov;#endif#ifdef CONFIG_MEMCGstruct mem_cgroup*memcg_in_oom;gfp_tmemcg_oom_gfp_mask;intmemcg_oom_order;/* Number of pages to reclaim on returning to userland: */unsigned intmemcg_nr_pages_over_high;#endif#ifdef CONFIG_UPROBESstruct uprobe_task*utask;#endif#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)unsigned intsequential_io;unsigned intsequential_io_avg;#endif#ifdef CONFIG_DEBUG_ATOMIC_SLEEPunsigned longtask_state_change;#endifintpagefault_disabled;#ifdef CONFIG_MMUstruct task_struct*oom_reaper_list;#endif#ifdef CONFIG_VMAP_STACKstruct vm_struct*stack_vm_area;#endif#ifdef CONFIG_THREAD_INFO_IN_TASK/* A live task holds one reference: */atomic_tstack_refcount;#endif#ifdef CONFIG_LIVEPATCHint patch_state;#endif#ifdef CONFIG_SECURITY/* Used by LSM modules for access restriction: */void*security;#endif/* CPU-specific state of this task: */struct thread_structthread;/* * WARNING: on x86, 'thread_struct' contains a variable-sized * structure.  It *MUST* be at the end of 'task_struct'. * * Do not put anything below here! */};
在2.6之前的内核版本,task_stuct采用的是静态分配,在2.6之后的内核版本,为了能复用task_struct控制块及cache 着色,task_struct的内存空间采用SLUB进行分配,

在一些寄存器资源比较少的处理器(如x86),为了能用更少的寄存器就读取到进程描述符(task_struc),因此在内核栈底部增加了thread_info,并且把task_struct的指针存放在thread_info的第一个偏移位置,这样内核只需要通过内核栈就能访问到task_struct了;

内核栈大小:(一般情况:32位系统为4K,64位系统为8K)。


原创粉丝点击