/linux-3.10.1/include/linux/sched.h

来源:互联网 发布:网络规划设计师通过率 编辑:程序博客网 时间:2024/06/06 07:21
#ifndef _LINUX_SCHED_H#define _LINUX_SCHED_H#include <uapi/linux/sched.h>struct sched_param {int sched_priority;};#include <asm/param.h>/* for HZ */#include <linux/capability.h>#include <linux/threads.h>#include <linux/kernel.h>#include <linux/types.h>#include <linux/timex.h>#include <linux/jiffies.h>#include <linux/rbtree.h>#include <linux/thread_info.h>#include <linux/cpumask.h>#include <linux/errno.h>#include <linux/nodemask.h>#include <linux/mm_types.h>#include <asm/page.h>#include <asm/ptrace.h>#include <asm/cputime.h>#include <linux/smp.h>#include <linux/sem.h>#include <linux/signal.h>#include <linux/compiler.h>#include <linux/completion.h>#include <linux/pid.h>#include <linux/percpu.h>#include <linux/topology.h>#include <linux/proportions.h>#include <linux/seccomp.h>#include <linux/rcupdate.h>#include <linux/rculist.h>#include <linux/rtmutex.h>#include <linux/time.h>#include <linux/param.h>#include <linux/resource.h>#include <linux/timer.h>#include <linux/hrtimer.h>#include <linux/task_io_accounting.h>#include <linux/latencytop.h>#include <linux/cred.h>#include <linux/llist.h>#include <linux/uidgid.h>#include <linux/gfp.h>#include <asm/processor.h>struct exec_domain;struct futex_pi_state;struct robust_list_head;struct bio_list;struct fs_struct;struct perf_event_context;struct blk_plug;/* * List of flags we want to share for kernel threads, * if only because they are not used by them anyway. */#define CLONE_KERNEL(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)/* * These are the constant used to fake the fixed-point load-average * counting. Some notes: *  - 11 bit fractions expand to 22 bits by the multiplies: this gives *    a load-average precision of 10 bits integer + 11 bits fractional *  - if you want to count load-averages more often, you need more *    precision, or rounding will get you. With 2-second counting freq, *    the EXP_n values would be 1981, 2034 and 2043 if still using only *    11 bit fractions. */extern unsigned long avenrun[];/* Load averages */extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);#define FSHIFT11/* nr of bits of precision */#define FIXED_1(1<<FSHIFT)/* 1.0 as fixed-point */#define LOAD_FREQ(5*HZ+1)/* 5 sec intervals */#define EXP_11884/* 1/exp(5sec/1min) as fixed-point */#define EXP_52014/* 1/exp(5sec/5min) */#define EXP_152037/* 1/exp(5sec/15min) */#define CALC_LOAD(load,exp,n) \load *= exp; \load += n*(FIXED_1-exp); \load >>= FSHIFT;extern unsigned long total_forks;extern int nr_threads;DECLARE_PER_CPU(unsigned long, process_counts);extern int nr_processes(void);extern unsigned long nr_running(void);extern unsigned long nr_iowait(void);extern unsigned long nr_iowait_cpu(int cpu);extern unsigned long this_cpu_load(void);extern void calc_global_load(unsigned long ticks);extern void update_cpu_load_nohz(void);/* Notifier for when a task gets migrated to a new CPU */struct task_migration_notifier {struct task_struct *task;int from_cpu;int to_cpu;};extern void register_task_migration_notifier(struct notifier_block *n);extern unsigned long get_parent_ip(unsigned long addr);extern void dump_cpu_task(int cpu);struct seq_file;struct cfs_rq;struct task_group;#ifdef CONFIG_SCHED_DEBUGextern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);extern void proc_sched_set_task(struct task_struct *p);extern voidprint_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);#endif/* * Task state bitmask. NOTE! These bits are also * encoded in fs/proc/array.c: get_task_state(). * * We have two separate sets of flags: task->state * is about runnability, while task->exit_state are * about the task exiting. Confusing, but this way * modifying one set can't modify the other one by * mistake. */#define TASK_RUNNING0#define TASK_INTERRUPTIBLE1#define TASK_UNINTERRUPTIBLE2#define __TASK_STOPPED4#define __TASK_TRACED8/* in tsk->exit_state */#define EXIT_ZOMBIE16#define EXIT_DEAD32/* in tsk->state again */#define TASK_DEAD64#define TASK_WAKEKILL128#define TASK_WAKING256#define TASK_PARKED512#define TASK_STATE_MAX1024#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"extern char ___assert_task_state[1 - 2*!!(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];/* Convenience macros for the sake of set_task_state */#define TASK_KILLABLE(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)#define TASK_STOPPED(TASK_WAKEKILL | __TASK_STOPPED)#define TASK_TRACED(TASK_WAKEKILL | __TASK_TRACED)/* Convenience macros for the sake of wake_up */#define TASK_NORMAL(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)#define TASK_ALL(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)/* get_task_state() */#define TASK_REPORT(TASK_RUNNING | TASK_INTERRUPTIBLE | \ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ __TASK_TRACED)#define task_is_traced(task)((task->state & __TASK_TRACED) != 0)#define task_is_stopped(task)((task->state & __TASK_STOPPED) != 0)#define task_is_dead(task)((task)->exit_state != 0)#define task_is_stopped_or_traced(task)\((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)#define task_contributes_to_load(task)\((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ (task->flags & PF_FROZEN) == 0)#define __set_task_state(tsk, state_value)\do { (tsk)->state = (state_value); } while (0)#define set_task_state(tsk, state_value)\set_mb((tsk)->state, (state_value))/* * set_current_state() includes a barrier so that the write of current->state * is correctly serialised wrt the caller's subsequent test of whether to * actually sleep: * *set_current_state(TASK_UNINTERRUPTIBLE); *if (do_i_need_to_sleep()) *schedule(); * * If the caller does not need such serialisation then use __set_current_state() */#define __set_current_state(state_value)\do { current->state = (state_value); } while (0)#define set_current_state(state_value)\set_mb(current->state, (state_value))/* Task command name length */#define TASK_COMM_LEN 16#include <linux/spinlock.h>/* * This serializes "schedule()" and also protects * the run-queue from deletions/modifications (but * _adding_ to the beginning of the run-queue has * a separate lock). */extern rwlock_t tasklist_lock;extern spinlock_t mmlist_lock;struct task_struct;#ifdef CONFIG_PROVE_RCUextern int lockdep_tasklist_lock_is_held(void);#endif /* #ifdef CONFIG_PROVE_RCU */extern void sched_init(void);extern void sched_init_smp(void);extern asmlinkage void schedule_tail(struct task_struct *prev);extern void init_idle(struct task_struct *idle, int cpu);extern void init_idle_bootup_task(struct task_struct *idle);extern int runqueue_is_locked(int cpu);#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)extern void nohz_balance_enter_idle(int cpu);extern void set_cpu_sd_state_idle(void);extern int get_nohz_timer_target(void);#elsestatic inline void nohz_balance_enter_idle(int cpu) { }static inline void set_cpu_sd_state_idle(void) { }#endif/* * Only dump TASK_* tasks. (0 for all tasks) */extern void show_state_filter(unsigned long state_filter);static inline void show_state(void){show_state_filter(0);}extern void show_regs(struct pt_regs *);/* * TASK is a pointer to the task whose backtrace we want to see (or NULL for current * task), SP is the stack pointer of the first frame that should be shown in the back * trace (or NULL if the entire call-chain of the task should be shown). */extern void show_stack(struct task_struct *task, unsigned long *sp);void io_schedule(void);long io_schedule_timeout(long timeout);extern void cpu_init (void);extern void trap_init(void);extern void update_process_times(int user);extern void scheduler_tick(void);extern void sched_show_task(struct task_struct *p);#ifdef CONFIG_LOCKUP_DETECTORextern void touch_softlockup_watchdog(void);extern void touch_softlockup_watchdog_sync(void);extern void touch_all_softlockup_watchdogs(void);extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,  void __user *buffer,  size_t *lenp, loff_t *ppos);extern unsigned int  softlockup_panic;void lockup_detector_init(void);#elsestatic inline void touch_softlockup_watchdog(void){}static inline void touch_softlockup_watchdog_sync(void){}static inline void touch_all_softlockup_watchdogs(void){}static inline void lockup_detector_init(void){}#endif/* Attach to any functions which should be ignored in wchan output. */#define __sched__attribute__((__section__(".sched.text")))/* Linker adds these: start and end of __sched functions */extern char __sched_text_start[], __sched_text_end[];/* Is this address in the __sched functions? */extern int in_sched_functions(unsigned long addr);#defineMAX_SCHEDULE_TIMEOUTLONG_MAXextern signed long schedule_timeout(signed long timeout);extern signed long schedule_timeout_interruptible(signed long timeout);extern signed long schedule_timeout_killable(signed long timeout);extern signed long schedule_timeout_uninterruptible(signed long timeout);asmlinkage void schedule(void);extern void schedule_preempt_disabled(void);struct nsproxy;struct user_namespace;#ifdef CONFIG_MMUextern void arch_pick_mmap_layout(struct mm_struct *mm);extern unsigned longarch_get_unmapped_area(struct file *, unsigned long, unsigned long,       unsigned long, unsigned long);extern unsigned longarch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,  unsigned long len, unsigned long pgoff,  unsigned long flags);extern void arch_unmap_area(struct mm_struct *, unsigned long);extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);#elsestatic inline void arch_pick_mmap_layout(struct mm_struct *mm) {}#endifextern void set_dumpable(struct mm_struct *mm, int value);extern int get_dumpable(struct mm_struct *mm);/* mm flags *//* dumpable bits */#define MMF_DUMPABLE      0  /* core dump is permitted */#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */#define MMF_DUMPABLE_BITS 2#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)/* coredump filter bits */#define MMF_DUMP_ANON_PRIVATE2#define MMF_DUMP_ANON_SHARED3#define MMF_DUMP_MAPPED_PRIVATE4#define MMF_DUMP_MAPPED_SHARED5#define MMF_DUMP_ELF_HEADERS6#define MMF_DUMP_HUGETLB_PRIVATE 7#define MMF_DUMP_HUGETLB_SHARED  8#define MMF_DUMP_FILTER_SHIFTMMF_DUMPABLE_BITS#define MMF_DUMP_FILTER_BITS7#define MMF_DUMP_FILTER_MASK \(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)#define MMF_DUMP_FILTER_DEFAULT \((1 << MMF_DUMP_ANON_PRIVATE) |(1 << MMF_DUMP_ANON_SHARED) |\ (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS# define MMF_DUMP_MASK_DEFAULT_ELF(1 << MMF_DUMP_ELF_HEADERS)#else# define MMF_DUMP_MASK_DEFAULT_ELF0#endif/* leave room for more dump flags */#define MMF_VM_MERGEABLE16/* KSM may merge identical pages */#define MMF_VM_HUGEPAGE17/* set when VM_HUGEPAGE is set on vma */#define MMF_EXE_FILE_CHANGED18/* see prctl_set_mm_exe_file() */#define MMF_HAS_UPROBES19/* has uprobes */#define MMF_RECALC_UPROBES20/* MMF_HAS_UPROBES can be wrong */#define MMF_INIT_MASK(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)struct sighand_struct {atomic_tcount;struct k_sigactionaction[_NSIG];spinlock_tsiglock;wait_queue_head_tsignalfd_wqh;};struct pacct_struct {intac_flag;longac_exitcode;unsigned longac_mem;cputime_tac_utime, ac_stime;unsigned longac_minflt, ac_majflt;};struct cpu_itimer {cputime_t expires;cputime_t incr;u32 error;u32 incr_error;};/** * struct cputime - snaphsot of system and user cputime * @utime: time spent in user mode * @stime: time spent in system mode * * Gathers a generic snapshot of user and system time. */struct cputime {cputime_t utime;cputime_t stime;};/** * struct task_cputime - collected CPU time counts * @utime:time spent in user mode, in &cputime_t units * @stime:time spent in kernel mode, in &cputime_t units * @sum_exec_runtime:total time spent on the CPU, in nanoseconds * * This is an extension of struct cputime that includes the total runtime * spent by the task from the scheduler point of view. * * As a result, this structure groups together three kinds of CPU time * that are tracked for threads and thread groups.  Most things considering * CPU time want to group these counts together and treat all three * of them in parallel. */struct task_cputime {cputime_t utime;cputime_t stime;unsigned long long sum_exec_runtime;};/* Alternate field names when used to cache expirations. */#define prof_expstime#define virt_exputime#define sched_expsum_exec_runtime#define INIT_CPUTIME\(struct task_cputime) {\.utime = 0,\.stime = 0,\.sum_exec_runtime = 0,\}/* * Disable preemption until the scheduler is running. * Reset by start_kernel()->sched_init()->init_idle(). * * We include PREEMPT_ACTIVE to avoid cond_resched() from working * before the scheduler is active -- see should_resched(). */#define INIT_PREEMPT_COUNT(1 + PREEMPT_ACTIVE)/** * struct thread_group_cputimer - thread group interval timer counts * @cputime:thread group interval timers. * @running:non-zero when there are timers running and * @cputime receives updates. * @lock:lock for fields in this struct. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations. */struct thread_group_cputimer {struct task_cputime cputime;int running;raw_spinlock_t lock;};#include <linux/rwsem.h>struct autogroup;/* * NOTE! "signal_struct" does not have its own * locking, because a shared signal_struct always * implies a shared sighand_struct, so locking * sighand_struct is always a proper superset of * the locking of signal_struct. */struct signal_struct {atomic_tsigcnt;atomic_tlive;intnr_threads;wait_queue_head_twait_chldexit;/* for wait4() *//* current thread group signal load-balancing target: */struct task_struct*curr_target;/* shared signal handling: */struct sigpendingshared_pending;/* thread group exit support */intgroup_exit_code;/* overloaded: * - notify group_exit_task when ->count is equal to notify_count * - everyone except group_exit_task is stopped during signal delivery *   of fatal signals, group_exit_task processes the signal. */intnotify_count;struct task_struct*group_exit_task;/* thread group stop support, overloads group_exit_code too */intgroup_stop_count;unsigned intflags; /* see SIGNAL_* flags below *//* * PR_SET_CHILD_SUBREAPER marks a process, like a service * manager, to re-parent orphan (double-forking) child processes * to this process instead of 'init'. The service manager is * able to receive SIGCHLD signals and is able to investigate * the process until it calls wait(). All children of this * process will inherit a flag if they should look for a * child_subreaper process at exit. */unsigned intis_child_subreaper:1;unsigned inthas_child_subreaper:1;/* POSIX.1b Interval Timers */intposix_timer_id;struct list_headposix_timers;/* ITIMER_REAL timer for the process */struct hrtimer real_timer;struct pid *leader_pid;ktime_t it_real_incr;/* * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these * values are defined to 0 and 1 respectively */struct cpu_itimer it[2];/* * Thread group totals for process CPU timers. * See thread_group_cputimer(), et al, for details. */struct thread_group_cputimer cputimer;/* Earliest-expiration cache. */struct task_cputime cputime_expires;struct list_head cpu_timers[3];struct pid *tty_old_pgrp;/* boolean value for session group leader */int leader;struct tty_struct *tty; /* NULL if no tty */#ifdef CONFIG_SCHED_AUTOGROUPstruct autogroup *autogroup;#endif/* * Cumulative resource counters for dead threads in the group, * and for reaped dead child processes forked by this group. * Live threads maintain their own counters and add to these * in __exit_signal, except for the group leader. */cputime_t utime, stime, cutime, cstime;cputime_t gtime;cputime_t cgtime;#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVEstruct cputime prev_cputime;#endifunsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;unsigned long inblock, oublock, cinblock, coublock;unsigned long maxrss, cmaxrss;struct task_io_accounting ioac;/* * Cumulative ns of schedule CPU time fo dead threads in the * group, not including a zombie group leader, (This only differs * from jiffies_to_ns(utime + stime) if sched_clock uses something * other than jiffies.) */unsigned long long sum_sched_runtime;/* * We don't bother to synchronize most readers of this at all, * because there is no reader checking a limit that actually needs * to get both rlim_cur and rlim_max atomically, and either one * alone is a single word that can safely be read normally. * getrlimit/setrlimit use task_lock(current->group_leader) to * protect this instead of the siglock, because they really * have no need to disable irqs. */struct rlimit rlim[RLIM_NLIMITS];#ifdef CONFIG_BSD_PROCESS_ACCTstruct pacct_struct pacct;/* per-process accounting information */#endif#ifdef CONFIG_TASKSTATSstruct taskstats *stats;#endif#ifdef CONFIG_AUDITunsigned audit_tty;unsigned audit_tty_log_passwd;struct tty_audit_buf *tty_audit_buf;#endif#ifdef CONFIG_CGROUPS/* * group_rwsem prevents new tasks from entering the threadgroup and * member tasks from exiting,a more specifically, setting of * PF_EXITING.  fork and exit paths are protected with this rwsem * using threadgroup_change_begin/end().  Users which require * threadgroup to remain stable should use threadgroup_[un]lock() * which also takes care of exec path.  Currently, cgroup is the * only user. */struct rw_semaphore group_rwsem;#endifoom_flags_t oom_flags;short oom_score_adj;/* OOM kill score adjustment */short oom_score_adj_min;/* OOM kill score adjustment min value. * Only settable by CAP_SYS_RESOURCE. */struct mutex cred_guard_mutex;/* guard against foreign influences on * credential calculations * (notably. ptrace) */};/* * Bits in flags field of signal_struct. */#define SIGNAL_STOP_STOPPED0x00000001 /* job control stop in effect */#define SIGNAL_STOP_CONTINUED0x00000002 /* SIGCONT since WCONTINUED reap */#define SIGNAL_GROUP_EXIT0x00000004 /* group exit in progress */#define SIGNAL_GROUP_COREDUMP0x00000008 /* coredump in progress *//* * Pending notifications to parent. */#define SIGNAL_CLD_STOPPED0x00000010#define SIGNAL_CLD_CONTINUED0x00000020#define SIGNAL_CLD_MASK(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)#define SIGNAL_UNKILLABLE0x00000040 /* for init: ignore fatal signals *//* If true, all threads except ->group_exit_task have pending SIGKILL */static inline int signal_group_exit(const struct signal_struct *sig){return(sig->flags & SIGNAL_GROUP_EXIT) ||(sig->group_exit_task != NULL);}/* * Some day this will be a full-fledged user tracking system.. */struct user_struct {atomic_t __count;/* reference count */atomic_t processes;/* How many processes does this user have? */atomic_t files;/* How many open files does this user have? */atomic_t sigpending;/* How many pending signals does this user have? */#ifdef CONFIG_INOTIFY_USERatomic_t inotify_watches; /* How many inotify watches does this user have? */atomic_t inotify_devs;/* How many inotify devs does this user have opened? */#endif#ifdef CONFIG_FANOTIFYatomic_t fanotify_listeners;#endif#ifdef CONFIG_EPOLLatomic_long_t epoll_watches; /* The number of file descriptors currently watched */#endif#ifdef CONFIG_POSIX_MQUEUE/* protected by mq_lock*/unsigned long mq_bytes;/* How many bytes can be allocated to mqueue? */#endifunsigned long locked_shm; /* How many pages of mlocked shm ? */#ifdef CONFIG_KEYSstruct key *uid_keyring;/* UID specific keyring */struct key *session_keyring;/* UID's default session keyring */#endif/* Hash table maintenance information */struct hlist_node uidhash_node;kuid_t uid;#ifdef CONFIG_PERF_EVENTSatomic_long_t locked_vm;#endif};extern int uids_sysfs_init(void);extern struct user_struct *find_user(kuid_t);extern struct user_struct root_user;#define INIT_USER (&root_user)struct backing_dev_info;struct reclaim_state;#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)struct sched_info {/* cumulative counters */unsigned long pcount;      /* # of times run on this cpu */unsigned long long run_delay; /* time spent waiting on a runqueue *//* timestamps */unsigned long long last_arrival,/* when we last ran on a cpu */   last_queued;/* when we were last queued to run */};#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */#ifdef CONFIG_TASK_DELAY_ACCTstruct task_delay_info {spinlock_tlock;unsigned intflags;/* Private per-task flags *//* For each stat XXX, add following, aligned appropriately * * struct timespec XXX_start, XXX_end; * u64 XXX_delay; * u32 XXX_count; * * Atomicity of updates to XXX_delay, XXX_count protected by * single lock above (split into XXX_lock if contention is an issue). *//* * XXX_count is incremented on every XXX operation, the delay * associated with the operation is added to XXX_delay. * XXX_delay contains the accumulated delay time in nanoseconds. */struct timespec blkio_start, blkio_end;/* Shared by blkio, swapin */u64 blkio_delay;/* wait for sync block io completion */u64 swapin_delay;/* wait for swapin block io completion */u32 blkio_count;/* total count of the number of sync block *//* io operations performed */u32 swapin_count;/* total count of the number of swapin block *//* io operations performed */struct timespec freepages_start, freepages_end;u64 freepages_delay;/* wait for memory reclaim */u32 freepages_count;/* total count of memory reclaim */};#endif/* CONFIG_TASK_DELAY_ACCT */static inline int sched_info_on(void){#ifdef CONFIG_SCHEDSTATSreturn 1;#elif defined(CONFIG_TASK_DELAY_ACCT)extern int delayacct_on;return delayacct_on;#elsereturn 0;#endif}enum cpu_idle_type {CPU_IDLE,CPU_NOT_IDLE,CPU_NEWLY_IDLE,CPU_MAX_IDLE_TYPES};/* * Increase resolution of cpu_power calculations */#define SCHED_POWER_SHIFT10#define SCHED_POWER_SCALE(1L << SCHED_POWER_SHIFT)/* * sched-domains (multiprocessor balancing) declarations: */#ifdef CONFIG_SMP#define SD_LOAD_BALANCE0x0001/* Do load balancing on this domain. */#define SD_BALANCE_NEWIDLE0x0002/* Balance when about to become idle */#define SD_BALANCE_EXEC0x0004/* Balance on exec */#define SD_BALANCE_FORK0x0008/* Balance on fork, clone */#define SD_BALANCE_WAKE0x0010  /* Balance on wakeup */#define SD_WAKE_AFFINE0x0020/* Wake task to waking CPU */#define SD_SHARE_CPUPOWER0x0080/* Domain members share cpu power */#define SD_SHARE_PKG_RESOURCES0x0200/* Domain members share cpu pkg resources */#define SD_SERIALIZE0x0400/* Only a single load balancing instance */#define SD_ASYM_PACKING0x0800  /* Place busy groups earlier in the domain */#define SD_PREFER_SIBLING0x1000/* Prefer to place tasks in a sibling domain */#define SD_OVERLAP0x2000/* sched_domains of this level overlap */extern int __weak arch_sd_sibiling_asym_packing(void);struct sched_domain_attr {int relax_domain_level;};#define SD_ATTR_INIT(struct sched_domain_attr) {\.relax_domain_level = -1,\}extern int sched_domain_level_max;struct sched_group;struct sched_domain {/* These fields must be setup */struct sched_domain *parent;/* top domain must be null terminated */struct sched_domain *child;/* bottom domain must be null terminated */struct sched_group *groups;/* the balancing groups of the domain */unsigned long min_interval;/* Minimum balance interval ms */unsigned long max_interval;/* Maximum balance interval ms */unsigned int busy_factor;/* less balancing by factor if busy */unsigned int imbalance_pct;/* No balance until over watermark */unsigned int cache_nice_tries;/* Leave cache hot tasks for # tries */unsigned int busy_idx;unsigned int idle_idx;unsigned int newidle_idx;unsigned int wake_idx;unsigned int forkexec_idx;unsigned int smt_gain;int nohz_idle;/* NOHZ IDLE status */int flags;/* See SD_* */int level;/* Runtime fields. */unsigned long last_balance;/* init to jiffies. units in jiffies */unsigned int balance_interval;/* initialise to 1. units in ms. */unsigned int nr_balance_failed; /* initialise to 0 */u64 last_update;#ifdef CONFIG_SCHEDSTATS/* load_balance() stats */unsigned int lb_count[CPU_MAX_IDLE_TYPES];unsigned int lb_failed[CPU_MAX_IDLE_TYPES];unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];unsigned int lb_gained[CPU_MAX_IDLE_TYPES];unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];/* Active load balancing */unsigned int alb_count;unsigned int alb_failed;unsigned int alb_pushed;/* SD_BALANCE_EXEC stats */unsigned int sbe_count;unsigned int sbe_balanced;unsigned int sbe_pushed;/* SD_BALANCE_FORK stats */unsigned int sbf_count;unsigned int sbf_balanced;unsigned int sbf_pushed;/* try_to_wake_up() stats */unsigned int ttwu_wake_remote;unsigned int ttwu_move_affine;unsigned int ttwu_move_balance;#endif#ifdef CONFIG_SCHED_DEBUGchar *name;#endifunion {void *private;/* used during construction */struct rcu_head rcu;/* used during destruction */};unsigned int span_weight;/* * Span of all CPUs in this domain. * * NOTE: this field is variable length. (Allocated dynamically * by attaching extra space to the end of the structure, * depending on how many CPUs the kernel has booted up with) */unsigned long span[0];};static inline struct cpumask *sched_domain_span(struct sched_domain *sd){return to_cpumask(sd->span);}extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],    struct sched_domain_attr *dattr_new);/* Allocate an array of sched domains, for partition_sched_domains(). */cpumask_var_t *alloc_sched_domains(unsigned int ndoms);void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);bool cpus_share_cache(int this_cpu, int that_cpu);#else /* CONFIG_SMP */struct sched_domain_attr;static inline voidpartition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],struct sched_domain_attr *dattr_new){}static inline bool cpus_share_cache(int this_cpu, int that_cpu){return true;}#endif/* !CONFIG_SMP */struct io_context;/* See blkdev.h */#ifdef ARCH_HAS_PREFETCH_SWITCH_STACKextern void prefetch_stack(struct task_struct *t);#elsestatic inline void prefetch_stack(struct task_struct *t) { }#endifstruct audit_context;/* See audit.c */struct mempolicy;struct pipe_inode_info;struct uts_namespace;struct load_weight {unsigned long weight, inv_weight;};struct sched_avg {/* * These sums represent an infinite geometric series and so are bound * above by 1024/(1-y).  Thus we only need a u32 to store them for for all * choices of y < 1-2^(-32)*1024. */u32 runnable_avg_sum, runnable_avg_period;u64 last_runnable_update;s64 decay_count;unsigned long load_avg_contrib;};#ifdef CONFIG_SCHEDSTATSstruct sched_statistics {u64wait_start;u64wait_max;u64wait_count;u64wait_sum;u64iowait_count;u64iowait_sum;u64sleep_start;u64sleep_max;s64sum_sleep_runtime;u64block_start;u64block_max;u64exec_max;u64slice_max;u64nr_migrations_cold;u64nr_failed_migrations_affine;u64nr_failed_migrations_running;u64nr_failed_migrations_hot;u64nr_forced_migrations;u64nr_wakeups;u64nr_wakeups_sync;u64nr_wakeups_migrate;u64nr_wakeups_local;u64nr_wakeups_remote;u64nr_wakeups_affine;u64nr_wakeups_affine_attempts;u64nr_wakeups_passive;u64nr_wakeups_idle;};#endifstruct sched_entity {struct load_weightload;/* for load-balancing */struct rb_noderun_node;struct list_headgroup_node;unsigned inton_rq;u64exec_start;u64sum_exec_runtime;u64vruntime;u64prev_sum_exec_runtime;u64nr_migrations;#ifdef CONFIG_SCHEDSTATSstruct sched_statistics statistics;#endif#ifdef CONFIG_FAIR_GROUP_SCHEDstruct sched_entity*parent;/* rq on which this entity is (to be) queued: */struct cfs_rq*cfs_rq;/* rq "owned" by this entity/group: */struct cfs_rq*my_q;#endif/* * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be * removed when useful for applications beyond shares distribution (e.g. * load-balance). */#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)/* Per-entity load-tracking */struct sched_avgavg;#endif};struct sched_rt_entity {struct list_head run_list;unsigned long timeout;unsigned long watchdog_stamp;unsigned int time_slice;struct sched_rt_entity *back;#ifdef CONFIG_RT_GROUP_SCHEDstruct sched_rt_entity*parent;/* rq on which this entity is (to be) queued: */struct rt_rq*rt_rq;/* rq "owned" by this entity/group: */struct rt_rq*my_q;#endif};struct rcu_node;enum perf_event_task_context {perf_invalid_context = -1,perf_hw_context = 0,perf_sw_context,perf_nr_task_contexts,};struct task_struct {volatile long state;/* -1 unrunnable, 0 runnable, >0 stopped */void *stack;atomic_t usage;unsigned int flags;/* per process flags, defined below */unsigned int ptrace;#ifdef CONFIG_SMPstruct llist_node wake_entry;int on_cpu;#endifint on_rq;int prio, static_prio, normal_prio;unsigned int rt_priority;const struct sched_class *sched_class;struct sched_entity se;struct sched_rt_entity rt;#ifdef CONFIG_CGROUP_SCHEDstruct task_group *sched_task_group;#endif#ifdef CONFIG_PREEMPT_NOTIFIERS/* list of struct preempt_notifier: */struct hlist_head preempt_notifiers;#endif/* * fpu_counter contains the number of consecutive context switches * that the FPU is used. If this is over a threshold, the lazy fpu * saving becomes unlazy to save the trap. This is an unsigned char * so that after 256 times the counter wraps and the behavior turns * lazy again; this to deal with bursty apps that only use FPU for * a short time */unsigned char fpu_counter;#ifdef CONFIG_BLK_DEV_IO_TRACEunsigned int btrace_seq;#endifunsigned int policy;int nr_cpus_allowed;cpumask_t cpus_allowed;#ifdef CONFIG_PREEMPT_RCUint rcu_read_lock_nesting;char rcu_read_unlock_special;struct list_head rcu_node_entry;#endif /* #ifdef CONFIG_PREEMPT_RCU */#ifdef CONFIG_TREE_PREEMPT_RCUstruct rcu_node *rcu_blocked_node;#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */#ifdef CONFIG_RCU_BOOSTstruct rt_mutex *rcu_boost_mutex;#endif /* #ifdef CONFIG_RCU_BOOST */#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)struct sched_info sched_info;#endifstruct list_head tasks;#ifdef CONFIG_SMPstruct plist_node pushable_tasks;#endifstruct mm_struct *mm, *active_mm;#ifdef CONFIG_COMPAT_BRKunsigned brk_randomized:1;#endif#if defined(SPLIT_RSS_COUNTING)struct task_rss_statrss_stat;#endif/* task state */int exit_state;int exit_code, exit_signal;int pdeath_signal;  /*  The signal sent when the parent dies  */unsigned int jobctl;/* JOBCTL_*, siglock protected *//* Used for emulating ABI behavior of previous Linux versions */unsigned int personality;unsigned did_exec:1;unsigned in_execve:1;/* Tell the LSMs that the process is doing an * execve */unsigned in_iowait:1;/* task may not gain privileges */unsigned no_new_privs:1;/* Revert to default priority/policy when forking */unsigned sched_reset_on_fork:1;unsigned sched_contributes_to_load:1;pid_t pid;pid_t tgid;#ifdef CONFIG_CC_STACKPROTECTOR/* Canary value for the -fstack-protector gcc feature */unsigned long stack_canary;#endif/* * pointers to (original) parent process, youngest child, younger sibling, * older sibling, respectively.  (p->father can be replaced with * p->real_parent->pid) */struct task_struct __rcu *real_parent; /* real parent process */struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports *//* * children/sibling forms the list of my natural children */struct list_head children;/* list of my children */struct list_head sibling;/* linkage in my parent's children list */struct task_struct *group_leader;/* threadgroup leader *//* * ptraced is the list of tasks this task is using ptrace on. * This includes both natural children and PTRACE_ATTACH targets. * p->ptrace_entry is p's link on the p->parent->ptraced list. */struct list_head ptraced;struct list_head ptrace_entry;/* PID/PID hash table linkage. */struct pid_link pids[PIDTYPE_MAX];struct list_head thread_group;struct completion *vfork_done;/* for vfork() */int __user *set_child_tid;/* CLONE_CHILD_SETTID */int __user *clear_child_tid;/* CLONE_CHILD_CLEARTID */cputime_t utime, stime, utimescaled, stimescaled;cputime_t gtime;#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVEstruct cputime prev_cputime;#endif#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GENseqlock_t vtime_seqlock;unsigned long long vtime_snap;enum {VTIME_SLEEPING = 0,VTIME_USER,VTIME_SYS,} vtime_snap_whence;#endifunsigned long nvcsw, nivcsw; /* context switch counts */struct timespec start_time; /* monotonic time */struct timespec real_start_time;/* boot based time *//* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */unsigned long min_flt, maj_flt;struct task_cputime cputime_expires;struct list_head cpu_timers[3];/* process credentials */const struct cred __rcu *real_cred; /* objective and real subjective task * credentials (COW) */const struct cred __rcu *cred;/* effective (overridable) subjective task * credentials (COW) */char comm[TASK_COMM_LEN]; /* executable name excluding path     - access with [gs]et_task_comm (which lock       it with task_lock())     - initialized normally by setup_new_exec *//* file system info */int link_count, total_link_count;#ifdef CONFIG_SYSVIPC/* ipc stuff */struct sysv_sem sysvsem;#endif#ifdef CONFIG_DETECT_HUNG_TASK/* hung task detection */unsigned long last_switch_count;#endif/* CPU-specific state of this task */struct thread_struct thread;/* filesystem information */struct fs_struct *fs;/* open file information */struct files_struct *files;/* namespaces */struct nsproxy *nsproxy;/* signal handlers */struct signal_struct *signal;struct sighand_struct *sighand;sigset_t blocked, real_blocked;sigset_t saved_sigmask;/* restored if set_restore_sigmask() was used */struct sigpending pending;unsigned long sas_ss_sp;size_t sas_ss_size;int (*notifier)(void *priv);void *notifier_data;sigset_t *notifier_mask;struct callback_head *task_works;struct audit_context *audit_context;#ifdef CONFIG_AUDITSYSCALLkuid_t loginuid;unsigned int sessionid;#endifstruct seccomp seccomp;/* Thread group tracking */   u32 parent_exec_id;   u32 self_exec_id;/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, * mempolicy */spinlock_t alloc_lock;/* Protection of the PI data structures: */raw_spinlock_t pi_lock;#ifdef CONFIG_RT_MUTEXES/* PI waiters blocked on a rt_mutex held by this task */struct plist_head pi_waiters;/* Deadlock detection and priority inheritance handling */struct rt_mutex_waiter *pi_blocked_on;#endif#ifdef CONFIG_DEBUG_MUTEXES/* mutex deadlock detection */struct mutex_waiter *blocked_on;#endif#ifdef CONFIG_TRACE_IRQFLAGSunsigned int irq_events;unsigned long hardirq_enable_ip;unsigned long hardirq_disable_ip;unsigned int hardirq_enable_event;unsigned int hardirq_disable_event;int hardirqs_enabled;int hardirq_context;unsigned long softirq_disable_ip;unsigned long softirq_enable_ip;unsigned int softirq_disable_event;unsigned int softirq_enable_event;int softirqs_enabled;int softirq_context;#endif#ifdef CONFIG_LOCKDEP# define MAX_LOCK_DEPTH 48ULu64 curr_chain_key;int lockdep_depth;unsigned int lockdep_recursion;struct held_lock held_locks[MAX_LOCK_DEPTH];gfp_t lockdep_reclaim_gfp;#endif/* journalling filesystem info */void *journal_info;/* stacked block device info */struct bio_list *bio_list;#ifdef CONFIG_BLOCK/* stack plugging */struct blk_plug *plug;#endif/* VM state */struct reclaim_state *reclaim_state;struct backing_dev_info *backing_dev_info;struct io_context *io_context;unsigned long ptrace_message;siginfo_t *last_siginfo; /* For ptrace use.  */struct task_io_accounting ioac;#if defined(CONFIG_TASK_XACCT)u64 acct_rss_mem1;/* accumulated rss usage */u64 acct_vm_mem1;/* accumulated virtual memory usage */cputime_t acct_timexpd;/* stime + utime since last update */#endif#ifdef CONFIG_CPUSETSnodemask_t mems_allowed;/* Protected by alloc_lock */seqcount_t mems_allowed_seq;/* Seqence no to catch updates */int cpuset_mem_spread_rotor;int cpuset_slab_spread_rotor;#endif#ifdef CONFIG_CGROUPS/* Control Group info protected by css_set_lock */struct css_set __rcu *cgroups;/* cg_list protected by css_set_lock and tsk->alloc_lock */struct list_head cg_list;#endif#ifdef CONFIG_FUTEXstruct robust_list_head __user *robust_list;#ifdef CONFIG_COMPATstruct compat_robust_list_head __user *compat_robust_list;#endifstruct list_head pi_state_list;struct futex_pi_state *pi_state_cache;#endif#ifdef CONFIG_PERF_EVENTSstruct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];struct mutex perf_event_mutex;struct list_head perf_event_list;#endif#ifdef CONFIG_NUMAstruct mempolicy *mempolicy;/* Protected by alloc_lock */short il_next;short pref_node_fork;#endif#ifdef CONFIG_NUMA_BALANCINGint numa_scan_seq;int numa_migrate_seq;unsigned int numa_scan_period;u64 node_stamp;/* migration stamp  */struct callback_head numa_work;#endif /* CONFIG_NUMA_BALANCING */struct rcu_head rcu;/* * cache last used pipe for splice */struct pipe_inode_info *splice_pipe;struct page_frag task_frag;#ifdefCONFIG_TASK_DELAY_ACCTstruct task_delay_info *delays;#endif#ifdef CONFIG_FAULT_INJECTIONint make_it_fail;#endif/* * when (nr_dirtied >= nr_dirtied_pause), it's time to call * balance_dirty_pages() for some dirty throttling pause */int nr_dirtied;int nr_dirtied_pause;unsigned long dirty_paused_when; /* start of a write-and-pause period */#ifdef CONFIG_LATENCYTOPint latency_record_count;struct latency_record latency_record[LT_SAVECOUNT];#endif/* * time slack values; these are used to round up poll() and * select() etc timeout values. These are in nanoseconds. */unsigned long timer_slack_ns;unsigned long default_timer_slack_ns;#ifdef CONFIG_FUNCTION_GRAPH_TRACER/* Index of current stored address in ret_stack */int curr_ret_stack;/* Stack of return addresses for return function tracing */struct ftrace_ret_stack*ret_stack;/* time stamp for last schedule */unsigned long long ftrace_timestamp;/* * Number of functions that haven't been traced * because of depth overrun. */atomic_t trace_overrun;/* Pause for the tracing */atomic_t tracing_graph_pause;#endif#ifdef CONFIG_TRACING/* state flags for use by tracers */unsigned long trace;/* bitmask and counter of trace recursion */unsigned long trace_recursion;#endif /* CONFIG_TRACING */#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */struct memcg_batch_info {int do_batch;/* incremented when batch uncharge started */struct mem_cgroup *memcg; /* target memcg of uncharge */unsigned long nr_pages;/* uncharged usage */unsigned long memsw_nr_pages; /* uncharged mem+swap usage */} memcg_batch;unsigned int memcg_kmem_skip_account;#endif#ifdef CONFIG_HAVE_HW_BREAKPOINTatomic_t ptrace_bp_refcnt;#endif#ifdef CONFIG_UPROBESstruct uprobe_task *utask;#endif#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)unsigned intsequential_io;unsigned intsequential_io_avg;#endif};/* Future-safe accessor for struct task_struct's cpus_allowed. */#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)#ifdef CONFIG_NUMA_BALANCINGextern void task_numa_fault(int node, int pages, bool migrated);extern void set_numabalancing_state(bool enabled);#elsestatic inline void task_numa_fault(int node, int pages, bool migrated){}static inline void set_numabalancing_state(bool enabled){}#endifstatic inline struct pid *task_pid(struct task_struct *task){return task->pids[PIDTYPE_PID].pid;}static inline struct pid *task_tgid(struct task_struct *task){return task->group_leader->pids[PIDTYPE_PID].pid;}/* * Without tasklist or rcu lock it is not safe to dereference * the result of task_pgrp/task_session even if task == current, * we can race with another thread doing sys_setsid/sys_setpgid. */static inline struct pid *task_pgrp(struct task_struct *task){return task->group_leader->pids[PIDTYPE_PGID].pid;}static inline struct pid *task_session(struct task_struct *task){return task->group_leader->pids[PIDTYPE_SID].pid;}struct pid_namespace;/* * the helpers to get the task's different pids as they are seen * from various namespaces * * task_xid_nr()     : global id, i.e. the id seen from the init namespace; * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of *                     current. * task_xid_nr_ns()  : id seen from the ns specified; * * set_task_vxid()   : assigns a virtual id to a task; * * see also pid_nr() etc in include/linux/pid.h */pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,struct pid_namespace *ns);static inline pid_t task_pid_nr(struct task_struct *tsk){return tsk->pid;}static inline pid_t task_pid_nr_ns(struct task_struct *tsk,struct pid_namespace *ns){return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);}static inline pid_t task_pid_vnr(struct task_struct *tsk){return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);}static inline pid_t task_tgid_nr(struct task_struct *tsk){return tsk->tgid;}pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);static inline pid_t task_tgid_vnr(struct task_struct *tsk){return pid_vnr(task_tgid(tsk));}static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,struct pid_namespace *ns){return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);}static inline pid_t task_pgrp_vnr(struct task_struct *tsk){return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);}static inline pid_t task_session_nr_ns(struct task_struct *tsk,struct pid_namespace *ns){return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);}static inline pid_t task_session_vnr(struct task_struct *tsk){return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);}/* obsolete, do not use */static inline pid_t task_pgrp_nr(struct task_struct *tsk){return task_pgrp_nr_ns(tsk, &init_pid_ns);}/** * pid_alive - check that a task structure is not stale * @p: Task structure to be checked. * * Test if a process is not yet dead (at most zombie state) * If pid_alive fails, then pointers within the task structure * can be stale and must not be dereferenced. */static inline int pid_alive(struct task_struct *p){return p->pids[PIDTYPE_PID].pid != NULL;}/** * is_global_init - check if a task structure is init * @tsk: Task structure to be checked. * * Check if a task structure is the first user space task the kernel created. */static inline int is_global_init(struct task_struct *tsk){return tsk->pid == 1;}extern struct pid *cad_pid;extern void free_task(struct task_struct *tsk);#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)extern void __put_task_struct(struct task_struct *t);static inline void put_task_struct(struct task_struct *t){if (atomic_dec_and_test(&t->usage))__put_task_struct(t);}#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GENextern void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime);extern void task_cputime_scaled(struct task_struct *t,cputime_t *utimescaled, cputime_t *stimescaled);extern cputime_t task_gtime(struct task_struct *t);#elsestatic inline void task_cputime(struct task_struct *t,cputime_t *utime, cputime_t *stime){if (utime)*utime = t->utime;if (stime)*stime = t->stime;}static inline void task_cputime_scaled(struct task_struct *t,       cputime_t *utimescaled,       cputime_t *stimescaled){if (utimescaled)*utimescaled = t->utimescaled;if (stimescaled)*stimescaled = t->stimescaled;}static inline cputime_t task_gtime(struct task_struct *t){return t->gtime;}#endifextern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);/* * Per process flags */#define PF_EXITING0x00000004/* getting shut down */#define PF_EXITPIDONE0x00000008/* pi exit done on shut down */#define PF_VCPU0x00000010/* I'm a virtual CPU */#define PF_WQ_WORKER0x00000020/* I'm a workqueue worker */#define PF_FORKNOEXEC0x00000040/* forked but didn't exec */#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */#define PF_SUPERPRIV0x00000100/* used super-user privileges */#define PF_DUMPCORE0x00000200/* dumped core */#define PF_SIGNALED0x00000400/* killed by a signal */#define PF_MEMALLOC0x00000800/* Allocating memory */#define PF_NPROC_EXCEEDED 0x00001000/* set_user noticed that RLIMIT_NPROC was exceeded */#define PF_USED_MATH0x00002000/* if unset the fpu must be initialized before use */#define PF_USED_ASYNC0x00004000/* used async_schedule*(), used by module init */#define PF_NOFREEZE0x00008000/* this thread should not be frozen */#define PF_FROZEN0x00010000/* frozen for system suspend */#define PF_FSTRANS0x00020000/* inside a filesystem transaction */#define PF_KSWAPD0x00040000/* I am kswapd */#define PF_MEMALLOC_NOIO 0x00080000/* Allocating memory without IO involved */#define PF_LESS_THROTTLE 0x00100000/* Throttle me less: I clean memory */#define PF_KTHREAD0x00200000/* I am a kernel thread */#define PF_RANDOMIZE0x00400000/* randomize virtual address space */#define PF_SWAPWRITE0x00800000/* Allowed to write to swap */#define PF_SPREAD_PAGE0x01000000/* Spread page cache over cpuset */#define PF_SPREAD_SLAB0x02000000/* Spread some slab caches over cpuset */#define PF_NO_SETAFFINITY 0x04000000/* Userland is not allowed to meddle with cpus_allowed */#define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */#define PF_MEMPOLICY0x10000000/* Non-default NUMA mempolicy */#define PF_MUTEX_TESTER0x20000000/* Thread belongs to the rt mutex tester */#define PF_FREEZER_SKIP0x40000000/* Freezer should not count it as freezable *//* * Only the _current_ task can read/write to tsk->flags, but other * tasks can access tsk->flags in readonly mode for example * with tsk_used_math (like during threaded core dumping). * There is however an exception to this rule during ptrace * or during fork: the ptracer task is allowed to write to the * child->flags of its traced child (same goes for fork, the parent * can write to the child->flags), because we're guaranteed the * child is not running and in turn not changing child->flags * at the same time the parent does it. */#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)#define clear_used_math() clear_stopped_child_used_math(current)#define set_used_math() set_stopped_child_used_math(current)#define conditional_stopped_child_used_math(condition, child) \do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)#define conditional_used_math(condition) \conditional_stopped_child_used_math(condition, current)#define copy_to_stopped_child_used_math(child) \do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)#define used_math() tsk_used_math(current)/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */static inline gfp_t memalloc_noio_flags(gfp_t flags){if (unlikely(current->flags & PF_MEMALLOC_NOIO))flags &= ~__GFP_IO;return flags;}static inline unsigned int memalloc_noio_save(void){unsigned int flags = current->flags & PF_MEMALLOC_NOIO;current->flags |= PF_MEMALLOC_NOIO;return flags;}static inline void memalloc_noio_restore(unsigned int flags){current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;}/* * task->jobctl flags */#define JOBCTL_STOP_SIGMASK0xffff/* signr of the last group stop */#define JOBCTL_STOP_DEQUEUED_BIT 16/* stop signal dequeued */#define JOBCTL_STOP_PENDING_BIT17/* task should stop for group stop */#define JOBCTL_STOP_CONSUME_BIT18/* consume group stop count */#define JOBCTL_TRAP_STOP_BIT19/* trap for STOP */#define JOBCTL_TRAP_NOTIFY_BIT20/* trap for NOTIFY */#define JOBCTL_TRAPPING_BIT21/* switching to TRACED */#define JOBCTL_LISTENING_BIT22/* ptracer is listening for events */#define JOBCTL_STOP_DEQUEUED(1 << JOBCTL_STOP_DEQUEUED_BIT)#define JOBCTL_STOP_PENDING(1 << JOBCTL_STOP_PENDING_BIT)#define JOBCTL_STOP_CONSUME(1 << JOBCTL_STOP_CONSUME_BIT)#define JOBCTL_TRAP_STOP(1 << JOBCTL_TRAP_STOP_BIT)#define JOBCTL_TRAP_NOTIFY(1 << JOBCTL_TRAP_NOTIFY_BIT)#define JOBCTL_TRAPPING(1 << JOBCTL_TRAPPING_BIT)#define JOBCTL_LISTENING(1 << JOBCTL_LISTENING_BIT)#define JOBCTL_TRAP_MASK(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)#define JOBCTL_PENDING_MASK(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)extern bool task_set_jobctl_pending(struct task_struct *task,    unsigned int mask);extern void task_clear_jobctl_trapping(struct task_struct *task);extern void task_clear_jobctl_pending(struct task_struct *task,      unsigned int mask);#ifdef CONFIG_PREEMPT_RCU#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */static inline void rcu_copy_process(struct task_struct *p){p->rcu_read_lock_nesting = 0;p->rcu_read_unlock_special = 0;#ifdef CONFIG_TREE_PREEMPT_RCUp->rcu_blocked_node = NULL;#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */#ifdef CONFIG_RCU_BOOSTp->rcu_boost_mutex = NULL;#endif /* #ifdef CONFIG_RCU_BOOST */INIT_LIST_HEAD(&p->rcu_node_entry);}#elsestatic inline void rcu_copy_process(struct task_struct *p){}#endifstatic inline void tsk_restore_flags(struct task_struct *task,unsigned long orig_flags, unsigned long flags){task->flags &= ~flags;task->flags |= orig_flags & flags;}#ifdef CONFIG_SMPextern void do_set_cpus_allowed(struct task_struct *p,       const struct cpumask *new_mask);extern int set_cpus_allowed_ptr(struct task_struct *p,const struct cpumask *new_mask);#elsestatic inline void do_set_cpus_allowed(struct task_struct *p,      const struct cpumask *new_mask){}static inline int set_cpus_allowed_ptr(struct task_struct *p,       const struct cpumask *new_mask){if (!cpumask_test_cpu(0, new_mask))return -EINVAL;return 0;}#endif#ifdef CONFIG_NO_HZ_COMMONvoid calc_load_enter_idle(void);void calc_load_exit_idle(void);#elsestatic inline void calc_load_enter_idle(void) { }static inline void calc_load_exit_idle(void) { }#endif /* CONFIG_NO_HZ_COMMON */#ifndef CONFIG_CPUMASK_OFFSTACKstatic inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask){return set_cpus_allowed_ptr(p, &new_mask);}#endif/* * Do not use outside of architecture code which knows its limitations. * * sched_clock() has no promise of monotonicity or bounded drift between * CPUs, use (which you should not) requires disabling IRQs. * * Please use one of the three interfaces below. */extern unsigned long long notrace sched_clock(void);/* * See the comment in kernel/sched/clock.c */extern u64 cpu_clock(int cpu);extern u64 local_clock(void);extern u64 sched_clock_cpu(int cpu);extern void sched_clock_init(void);#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCKstatic inline void sched_clock_tick(void){}static inline void sched_clock_idle_sleep_event(void){}static inline void sched_clock_idle_wakeup_event(u64 delta_ns){}#else/* * Architectures can set this to 1 if they have specified * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, * but then during bootup it turns out that sched_clock() * is reliable after all: */extern int sched_clock_stable;extern void sched_clock_tick(void);extern void sched_clock_idle_sleep_event(void);extern void sched_clock_idle_wakeup_event(u64 delta_ns);#endif#ifdef CONFIG_IRQ_TIME_ACCOUNTING/* * An i/f to runtime opt-in for irq time accounting based off of sched_clock. * The reason for this explicit opt-in is not to have perf penalty with * slow sched_clocks. */extern void enable_sched_clock_irqtime(void);extern void disable_sched_clock_irqtime(void);#elsestatic inline void enable_sched_clock_irqtime(void) {}static inline void disable_sched_clock_irqtime(void) {}#endifextern unsigned long longtask_sched_runtime(struct task_struct *task);/* sched_exec is called by processes performing an exec */#ifdef CONFIG_SMPextern void sched_exec(void);#else#define sched_exec()   {}#endifextern void sched_clock_idle_sleep_event(void);extern void sched_clock_idle_wakeup_event(u64 delta_ns);#ifdef CONFIG_HOTPLUG_CPUextern void idle_task_exit(void);#elsestatic inline void idle_task_exit(void) {}#endif#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)extern void wake_up_nohz_cpu(int cpu);#elsestatic inline void wake_up_nohz_cpu(int cpu) { }#endif#ifdef CONFIG_NO_HZ_FULLextern bool sched_can_stop_tick(void);extern u64 scheduler_tick_max_deferment(void);#elsestatic inline bool sched_can_stop_tick(void) { return false; }#endif#ifdef CONFIG_SCHED_AUTOGROUPextern void sched_autogroup_create_attach(struct task_struct *p);extern void sched_autogroup_detach(struct task_struct *p);extern void sched_autogroup_fork(struct signal_struct *sig);extern void sched_autogroup_exit(struct signal_struct *sig);#ifdef CONFIG_PROC_FSextern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);#endif#elsestatic inline void sched_autogroup_create_attach(struct task_struct *p) { }static inline void sched_autogroup_detach(struct task_struct *p) { }static inline void sched_autogroup_fork(struct signal_struct *sig) { }static inline void sched_autogroup_exit(struct signal_struct *sig) { }#endifextern bool yield_to(struct task_struct *p, bool preempt);extern void set_user_nice(struct task_struct *p, long nice);extern int task_prio(const struct task_struct *p);extern int task_nice(const struct task_struct *p);extern int can_nice(const struct task_struct *p, const int nice);extern int task_curr(const struct task_struct *p);extern int idle_cpu(int cpu);extern int sched_setscheduler(struct task_struct *, int,      const struct sched_param *);extern int sched_setscheduler_nocheck(struct task_struct *, int,      const struct sched_param *);extern struct task_struct *idle_task(int cpu);/** * is_idle_task - is the specified task an idle task? * @p: the task in question. */static inline bool is_idle_task(const struct task_struct *p){return p->pid == 0;}extern struct task_struct *curr_task(int cpu);extern void set_curr_task(int cpu, struct task_struct *p);void yield(void);/* * The default (Linux) execution domain. */extern struct exec_domaindefault_exec_domain;union thread_union {struct thread_info thread_info;unsigned long stack[THREAD_SIZE/sizeof(long)];};#ifndef __HAVE_ARCH_KSTACK_ENDstatic inline int kstack_end(void *addr){/* Reliable end of stack detection: * Some APM bios versions misalign the stack */return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));}#endifextern union thread_union init_thread_union;extern struct task_struct init_task;extern struct   mm_struct init_mm;extern struct pid_namespace init_pid_ns;/* * find a task by one of its numerical ids * * find_task_by_pid_ns(): *      finds a task by its pid in the specified namespace * find_task_by_vpid(): *      finds a task by its virtual pid * * see also find_vpid() etc in include/linux/pid.h */extern struct task_struct *find_task_by_vpid(pid_t nr);extern struct task_struct *find_task_by_pid_ns(pid_t nr,struct pid_namespace *ns);extern void __set_special_pids(struct pid *pid);/* per-UID process charging. */extern struct user_struct * alloc_uid(kuid_t);static inline struct user_struct *get_uid(struct user_struct *u){atomic_inc(&u->__count);return u;}extern void free_uid(struct user_struct *);#include <asm/current.h>extern void xtime_update(unsigned long ticks);extern int wake_up_state(struct task_struct *tsk, unsigned int state);extern int wake_up_process(struct task_struct *tsk);extern void wake_up_new_task(struct task_struct *tsk);#ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk);#else static inline void kick_process(struct task_struct *tsk) { }#endifextern void sched_fork(struct task_struct *p);extern void sched_dead(struct task_struct *p);extern void proc_caches_init(void);extern void flush_signals(struct task_struct *);extern void __flush_signals(struct task_struct *);extern void ignore_signals(struct task_struct *);extern void flush_signal_handlers(struct task_struct *, int force_default);extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info){unsigned long flags;int ret;spin_lock_irqsave(&tsk->sighand->siglock, flags);ret = dequeue_signal(tsk, mask, info);spin_unlock_irqrestore(&tsk->sighand->siglock, flags);return ret;}extern void block_all_signals(int (*notifier)(void *priv), void *priv,      sigset_t *mask);extern void unblock_all_signals(void);extern void release_task(struct task_struct * p);extern int send_sig_info(int, struct siginfo *, struct task_struct *);extern int force_sigsegv(int, struct task_struct *);extern int force_sig_info(int, struct siginfo *, struct task_struct *);extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,const struct cred *, u32);extern int kill_pgrp(struct pid *pid, int sig, int priv);extern int kill_pid(struct pid *pid, int sig, int priv);extern int kill_proc_info(int, struct siginfo *, pid_t);extern __must_check bool do_notify_parent(struct task_struct *, int);extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);extern void force_sig(int, struct task_struct *);extern int send_sig(int, struct task_struct *, int);extern int zap_other_threads(struct task_struct *p);extern struct sigqueue *sigqueue_alloc(void);extern void sigqueue_free(struct sigqueue *);extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);static inline void restore_saved_sigmask(void){if (test_and_clear_restore_sigmask())__set_current_blocked(¤t->saved_sigmask);}static inline sigset_t *sigmask_to_save(void){sigset_t *res = ¤t->blocked;if (unlikely(test_restore_sigmask()))res = ¤t->saved_sigmask;return res;}static inline int kill_cad_pid(int sig, int priv){return kill_pid(cad_pid, sig, priv);}/* These can be the second arg to send_sig_info/send_group_sig_info.  */#define SEND_SIG_NOINFO ((struct siginfo *) 0)#define SEND_SIG_PRIV((struct siginfo *) 1)#define SEND_SIG_FORCED((struct siginfo *) 2)/* * True if we are on the alternate signal stack. */static inline int on_sig_stack(unsigned long sp){#ifdef CONFIG_STACK_GROWSUPreturn sp >= current->sas_ss_sp &&sp - current->sas_ss_sp < current->sas_ss_size;#elsereturn sp > current->sas_ss_sp &&sp - current->sas_ss_sp <= current->sas_ss_size;#endif}static inline int sas_ss_flags(unsigned long sp){return (current->sas_ss_size == 0 ? SS_DISABLE: on_sig_stack(sp) ? SS_ONSTACK : 0);}static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig){if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))#ifdef CONFIG_STACK_GROWSUPreturn current->sas_ss_sp;#elsereturn current->sas_ss_sp + current->sas_ss_size;#endifreturn sp;}/* * Routines for handling mm_structs */extern struct mm_struct * mm_alloc(void);/* mmdrop drops the mm and the page tables */extern void __mmdrop(struct mm_struct *);static inline void mmdrop(struct mm_struct * mm){if (unlikely(atomic_dec_and_test(&mm->mm_count)))__mmdrop(mm);}/* mmput gets rid of the mappings and all user-space */extern void mmput(struct mm_struct *);/* Grab a reference to a task's mm, if it is not already going away */extern struct mm_struct *get_task_mm(struct task_struct *task);/* * Grab a reference to a task's mm, if it is not already going away * and ptrace_may_access with the mode parameter passed to it * succeeds. */extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);/* Remove the current tasks stale references to the old mm_struct */extern void mm_release(struct task_struct *, struct mm_struct *);/* Allocate a new mm structure and copy contents from tsk->mm */extern struct mm_struct *dup_mm(struct task_struct *tsk);extern int copy_thread(unsigned long, unsigned long, unsigned long,struct task_struct *);extern void flush_thread(void);extern void exit_thread(void);extern void exit_files(struct task_struct *);extern void __cleanup_sighand(struct sighand_struct *);extern void exit_itimers(struct signal_struct *);extern void flush_itimer_signals(void);extern void do_group_exit(int);extern int allow_signal(int);extern int disallow_signal(int);extern int do_execve(const char *,     const char __user * const __user *,     const char __user * const __user *);extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);struct task_struct *fork_idle(int);extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);extern void set_task_comm(struct task_struct *tsk, char *from);extern char *get_task_comm(char *to, struct task_struct *tsk);#ifdef CONFIG_SMPvoid scheduler_ipi(void);extern unsigned long wait_task_inactive(struct task_struct *, long match_state);#elsestatic inline void scheduler_ipi(void) { }static inline unsigned long wait_task_inactive(struct task_struct *p,       long match_state){return 1;}#endif#define next_task(p) \list_entry_rcu((p)->tasks.next, struct task_struct, tasks)#define for_each_process(p) \for (p = &init_task ; (p = next_task(p)) != &init_task ; )extern bool current_is_single_threaded(void);/* * Careful: do_each_thread/while_each_thread is a double loop so *          'break' will not work as expected - use goto instead. */#define do_each_thread(g, t) \for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do#define while_each_thread(g, t) \while ((t = next_thread(t)) != g)static inline int get_nr_threads(struct task_struct *tsk){return tsk->signal->nr_threads;}static inline bool thread_group_leader(struct task_struct *p){return p->exit_signal >= 0;}/* Do to the insanities of de_thread it is possible for a process * to have the pid of the thread group leader without actually being * the thread group leader.  For iteration through the pids in proc * all we care about is that we have a task with the appropriate * pid, we don't actually care if we have the right task. */static inline int has_group_leader_pid(struct task_struct *p){return p->pid == p->tgid;}static inlineint same_thread_group(struct task_struct *p1, struct task_struct *p2){return p1->tgid == p2->tgid;}static inline struct task_struct *next_thread(const struct task_struct *p){return list_entry_rcu(p->thread_group.next,      struct task_struct, thread_group);}static inline int thread_group_empty(struct task_struct *p){return list_empty(&p->thread_group);}#define delay_group_leader(p) \(thread_group_leader(p) && !thread_group_empty(p))/* * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring * subscriptions and synchronises with wait4().  Also used in procfs.  Also * pins the final release of task.io_context.  Also protects ->cpuset and * ->cgroup.subsys[]. And ->vfork_done. * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), * neither inside nor outside. */static inline void task_lock(struct task_struct *p){spin_lock(&p->alloc_lock);}static inline void task_unlock(struct task_struct *p){spin_unlock(&p->alloc_lock);}extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,unsigned long *flags);static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,       unsigned long *flags){struct sighand_struct *ret;ret = __lock_task_sighand(tsk, flags);(void)__cond_lock(&tsk->sighand->siglock, ret);return ret;}static inline void unlock_task_sighand(struct task_struct *tsk,unsigned long *flags){spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);}#ifdef CONFIG_CGROUPSstatic inline void threadgroup_change_begin(struct task_struct *tsk){down_read(&tsk->signal->group_rwsem);}static inline void threadgroup_change_end(struct task_struct *tsk){up_read(&tsk->signal->group_rwsem);}/** * threadgroup_lock - lock threadgroup * @tsk: member task of the threadgroup to lock * * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or * change ->group_leader/pid.  This is useful for cases where the threadgroup * needs to stay stable across blockable operations. * * fork and exit paths explicitly call threadgroup_change_{begin|end}() for * synchronization.  While held, no new task will be added to threadgroup * and no existing live task will have its PF_EXITING set. * * de_thread() does threadgroup_change_{begin|end}() when a non-leader * sub-thread becomes a new leader. */static inline void threadgroup_lock(struct task_struct *tsk){down_write(&tsk->signal->group_rwsem);}/** * threadgroup_unlock - unlock threadgroup * @tsk: member task of the threadgroup to unlock * * Reverse threadgroup_lock(). */static inline void threadgroup_unlock(struct task_struct *tsk){up_write(&tsk->signal->group_rwsem);}#elsestatic inline void threadgroup_change_begin(struct task_struct *tsk) {}static inline void threadgroup_change_end(struct task_struct *tsk) {}static inline void threadgroup_lock(struct task_struct *tsk) {}static inline void threadgroup_unlock(struct task_struct *tsk) {}#endif#ifndef __HAVE_THREAD_FUNCTIONS#define task_thread_info(task)((struct thread_info *)(task)->stack)#define task_stack_page(task)((task)->stack)static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org){*task_thread_info(p) = *task_thread_info(org);task_thread_info(p)->task = p;}static inline unsigned long *end_of_stack(struct task_struct *p){return (unsigned long *)(task_thread_info(p) + 1);}#endifstatic inline int object_is_on_stack(void *obj){void *stack = task_stack_page(current);return (obj >= stack) && (obj < (stack + THREAD_SIZE));}extern void thread_info_cache_init(void);#ifdef CONFIG_DEBUG_STACK_USAGEstatic inline unsigned long stack_not_used(struct task_struct *p){unsigned long *n = end_of_stack(p);do { /* Skip over canary */n++;} while (!*n);return (unsigned long)n - (unsigned long)end_of_stack(p);}#endif/* set thread flags in other task's structures * - see asm/thread_info.h for TIF_xxxx flags available */static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag){set_ti_thread_flag(task_thread_info(tsk), flag);}static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag){clear_ti_thread_flag(task_thread_info(tsk), flag);}static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag){return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);}static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag){return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);}static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag){return test_ti_thread_flag(task_thread_info(tsk), flag);}static inline void set_tsk_need_resched(struct task_struct *tsk){set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);}static inline void clear_tsk_need_resched(struct task_struct *tsk){clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);}static inline int test_tsk_need_resched(struct task_struct *tsk){return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));}static inline int restart_syscall(void){set_tsk_thread_flag(current, TIF_SIGPENDING);return -ERESTARTNOINTR;}static inline int signal_pending(struct task_struct *p){return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));}static inline int __fatal_signal_pending(struct task_struct *p){return unlikely(sigismember(&p->pending.signal, SIGKILL));}static inline int fatal_signal_pending(struct task_struct *p){return signal_pending(p) && __fatal_signal_pending(p);}static inline int signal_pending_state(long state, struct task_struct *p){if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))return 0;if (!signal_pending(p))return 0;return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);}static inline int need_resched(void){return unlikely(test_thread_flag(TIF_NEED_RESCHED));}/* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return * value indicates whether a reschedule was done in fact. * cond_resched_lock() will drop the spinlock before scheduling, * cond_resched_softirq() will enable bhs before scheduling. */extern int _cond_resched(void);#define cond_resched() ({\__might_sleep(__FILE__, __LINE__, 0);\_cond_resched();\})extern int __cond_resched_lock(spinlock_t *lock);#ifdef CONFIG_PREEMPT_COUNT#define PREEMPT_LOCK_OFFSETPREEMPT_OFFSET#else#define PREEMPT_LOCK_OFFSET0#endif#define cond_resched_lock(lock) ({\__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\__cond_resched_lock(lock);\})extern int __cond_resched_softirq(void);#define cond_resched_softirq() ({\__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);\__cond_resched_softirq();\})/* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPT, * but a general need for low latency) */static inline int spin_needbreak(spinlock_t *lock){#ifdef CONFIG_PREEMPTreturn spin_is_contended(lock);#elsereturn 0;#endif}/* * Idle thread specific functions to determine the need_resched * polling state. We have two versions, one based on TS_POLLING in * thread_info.status and one based on TIF_POLLING_NRFLAG in * thread_info.flags */#ifdef TS_POLLINGstatic inline int tsk_is_polling(struct task_struct *p){return task_thread_info(p)->status & TS_POLLING;}static inline void current_set_polling(void){current_thread_info()->status |= TS_POLLING;}static inline void current_clr_polling(void){current_thread_info()->status &= ~TS_POLLING;smp_mb__after_clear_bit();}#elif defined(TIF_POLLING_NRFLAG)static inline int tsk_is_polling(struct task_struct *p){return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);}static inline void current_set_polling(void){set_thread_flag(TIF_POLLING_NRFLAG);}static inline void current_clr_polling(void){clear_thread_flag(TIF_POLLING_NRFLAG);}#elsestatic inline int tsk_is_polling(struct task_struct *p) { return 0; }static inline void current_set_polling(void) { }static inline void current_clr_polling(void) { }#endif/* * Thread group CPU time accounting. */void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);static inline void thread_group_cputime_init(struct signal_struct *sig){raw_spin_lock_init(&sig->cputimer.lock);}/* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. * This is required every time the blocked sigset_t changes. * callers must hold sighand->siglock. */extern void recalc_sigpending_and_wake(struct task_struct *t);extern void recalc_sigpending(void);extern void signal_wake_up_state(struct task_struct *t, unsigned int state);static inline void signal_wake_up(struct task_struct *t, bool resume){signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);}static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume){signal_wake_up_state(t, resume ? __TASK_TRACED : 0);}/* * Wrappers for p->thread_info->cpu access. No-op on UP. */#ifdef CONFIG_SMPstatic inline unsigned int task_cpu(const struct task_struct *p){return task_thread_info(p)->cpu;}extern void set_task_cpu(struct task_struct *p, unsigned int cpu);#elsestatic inline unsigned int task_cpu(const struct task_struct *p){return 0;}static inline void set_task_cpu(struct task_struct *p, unsigned int cpu){}#endif /* CONFIG_SMP */extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);extern long sched_getaffinity(pid_t pid, struct cpumask *mask);#ifdef CONFIG_CGROUP_SCHEDextern struct task_group root_task_group;#endif /* CONFIG_CGROUP_SCHED */extern int task_can_switch_user(struct user_struct *up,struct task_struct *tsk);#ifdef CONFIG_TASK_XACCTstatic inline void add_rchar(struct task_struct *tsk, ssize_t amt){tsk->ioac.rchar += amt;}static inline void add_wchar(struct task_struct *tsk, ssize_t amt){tsk->ioac.wchar += amt;}static inline void inc_syscr(struct task_struct *tsk){tsk->ioac.syscr++;}static inline void inc_syscw(struct task_struct *tsk){tsk->ioac.syscw++;}#elsestatic inline void add_rchar(struct task_struct *tsk, ssize_t amt){}static inline void add_wchar(struct task_struct *tsk, ssize_t amt){}static inline void inc_syscr(struct task_struct *tsk){}static inline void inc_syscw(struct task_struct *tsk){}#endif#ifndef TASK_SIZE_OF#define TASK_SIZE_OF(tsk)TASK_SIZE#endif#ifdef CONFIG_MM_OWNERextern void mm_update_next_owner(struct mm_struct *mm);extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);#elsestatic inline void mm_update_next_owner(struct mm_struct *mm){}static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p){}#endif /* CONFIG_MM_OWNER */static inline unsigned long task_rlimit(const struct task_struct *tsk,unsigned int limit){return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);}static inline unsigned long task_rlimit_max(const struct task_struct *tsk,unsigned int limit){return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);}static inline unsigned long rlimit(unsigned int limit){return task_rlimit(current, limit);}static inline unsigned long rlimit_max(unsigned int limit){return task_rlimit_max(current, limit);}#endif

原创粉丝点击