【Linux API】kthread的使用

来源:互联网 发布:托洛茨基知乎 编辑:程序博客网 时间:2024/05/22 05:16

一,内核实例分析

先来分析一下一个实例,这个例子主要是创建一个内核线程,用来打印GPIO信息,当线程没有收到停止的指令时,一直打印。

int config_gpio_thread(void *tmp_gpio_num) //回调函数,返回值必须是int ,且参数必须是void *{         int gpio_num = *((int *)tmp_gpio_num);//类型强转,由void*转成int*,取值         while(!kthread_should_stop()) //当kthread_should_stop()函数没有被调用的时候,就一直打印。                {                 gpio_direction_output(gpio_num, 0);                 printk("the gpio get  a value of :%d:=========buqing.wang test \n",__gpio_get_value(gpio_num));                 msleep(500);                 gpio_direction_output(gpio_num, 2);                      printk("the gpio get  a value of :%d:=============buqing.wang test \n",__gpio_get_value(gpio_num));                 msleep(500);                }               return 0;} //具体的操作部分int handle_gpio_thead(int num)  {struct task_struct * gpio_thread;  //任务结构体void* gpio_num;gpio_num=#gpio_thread=kthread_create(config_gpio_thread,&gpio_num,"gpio_thread");//第二个参数是回调函数的参数,必须是void*wake_up_process(gpio_thread); //唤醒线程开始工作msleep(5000);  //5s后,叫停线程kthread_stop(gpio_thread);//这个函数调用后,kthread_should_stop()返回值由false变成true    return 0;} 

二,关于线程创建相关的API

创建Kthread主要有两种方法
第一种就是如上所述:
kthread_create(threadfn, data, namefmt, arg…),宏,返回一个task_struct *类型的数据址。创建线程任务,threadfn 线程的回调函数,data是参数,那么format是线程名字。
回调函数返回值必须是int,参数必须是void*
wake_up_process(gpio_thread),启动线程

第二种方法:

kthread_run(threadfn, data, namefmt, …) 直接创建线程并且启动

其他线程相关的API:
int kthread_stop(struct task_struct *k); //让线程结束
bool kthread_should_stop(void); //判断线程状态

参考文件:

kernel/include/linux/kthread.h

#ifndef _LINUX_KTHREAD_H#define _LINUX_KTHREAD_H/* Simple interface for creating and stopping kernel threads without mess. */#include <linux/err.h>#include <linux/sched.h>__printf(4, 5)struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),                       void *data,                       int node,                       const char namefmt[], ...);#define kthread_create(threadfn, data, namefmt, arg...) \    kthread_create_on_node(threadfn, data, -1, namefmt, ##arg)struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),                      void *data,                      unsigned int cpu,                      const char *namefmt);/** * kthread_run - create and wake a thread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @namefmt: printf-style name for the thread. * * Description: Convenient wrapper for kthread_create() followed by * wake_up_process().  Returns the kthread or ERR_PTR(-ENOMEM). */#define kthread_run(threadfn, data, namefmt, ...)              \({                                     \    struct task_struct *__k                        \        = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \    if (!IS_ERR(__k))                          \        wake_up_process(__k);                      \    __k;                                   \})void kthread_bind(struct task_struct *k, unsigned int cpu);int kthread_stop(struct task_struct *k);bool kthread_should_stop(void);bool kthread_should_park(void);bool kthread_freezable_should_stop(bool *was_frozen);void *kthread_data(struct task_struct *k);void *probe_kthread_data(struct task_struct *k);int kthread_park(struct task_struct *k);void kthread_unpark(struct task_struct *k);void kthread_parkme(void);int kthreadd(void *unused);extern struct task_struct *kthreadd_task;extern int tsk_fork_get_node(struct task_struct *tsk);/* * Simple work processor based on kthread. * * This provides easier way to make use of kthreads.  A kthread_work * can be queued and flushed using queue/flush_kthread_work() * respectively.  Queued kthread_works are processed by a kthread * running kthread_worker_fn(). */struct kthread_work;typedef void (*kthread_work_func_t)(struct kthread_work *work);struct kthread_worker {    spinlock_t      lock;    struct list_head    work_list;    struct task_struct  *task;    struct kthread_work *current_work;};struct kthread_work {    struct list_head    node;    kthread_work_func_t func;    wait_queue_head_t   done;    struct kthread_worker   *worker;};#define KTHREAD_WORKER_INIT(worker) {               \    .lock = __SPIN_LOCK_UNLOCKED((worker).lock),            \    .work_list = LIST_HEAD_INIT((worker).work_list),        \    }#define KTHREAD_WORK_INIT(work, fn) {               \    .node = LIST_HEAD_INIT((work).node),                \    .func = (fn),                           \    .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done),     \    }#define DEFINE_KTHREAD_WORKER(worker)                   \    struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)#define DEFINE_KTHREAD_WORK(work, fn)                   \    struct kthread_work work = KTHREAD_WORK_INIT(work, fn)/* * kthread_worker.lock and kthread_work.done need their own lockdep class * keys if they are defined on stack with lockdep enabled.  Use the * following macros when defining them on stack. */#ifdef CONFIG_LOCKDEP# define KTHREAD_WORKER_INIT_ONSTACK(worker)                \    ({ init_kthread_worker(&worker); worker; })# define DEFINE_KTHREAD_WORKER_ONSTACK(worker)              \    struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)# define KTHREAD_WORK_INIT_ONSTACK(work, fn)                \    ({ init_kthread_work((&work), fn); work; })# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn)              \    struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn)#else# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn)#endifextern void __init_kthread_worker(struct kthread_worker *worker,            const char *name, struct lock_class_key *key);#define init_kthread_worker(worker)                 \    do {                                \        static struct lock_class_key __key;         \        __init_kthread_worker((worker), "("#worker")->lock", &__key); \    } while (0)#define init_kthread_work(work, fn)                 \    do {                                \        memset((work), 0, sizeof(struct kthread_work));     \        INIT_LIST_HEAD(&(work)->node);              \        (work)->func = (fn);                    \        init_waitqueue_head(&(work)->done);         \    } while (0)int kthread_worker_fn(void *worker_ptr);bool queue_kthread_work(struct kthread_worker *worker,            struct kthread_work *work);void flush_kthread_work(struct kthread_work *work);void flush_kthread_worker(struct kthread_worker *worker);#endif /* _LINUX_KTHREAD_H */

task_struct结构体:

//kernel/include/linux/sched.hstruct task_struct {    volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */    void *stack;    atomic_t usage;    unsigned int flags; /* per process flags, defined below */    unsigned int ptrace;#ifdef CONFIG_SMP    struct llist_node wake_entry;    int on_cpu;#endif    int on_rq;    int prio, static_prio, normal_prio;    unsigned int rt_priority;    const struct sched_class *sched_class;    struct sched_entity se;    struct sched_rt_entity rt;#ifdef CONFIG_SCHED_HMP    struct ravg ravg;#endif#ifdef CONFIG_CGROUP_SCHED    struct task_group *sched_task_group;#endif#ifdef CONFIG_PREEMPT_NOTIFIERS    /* list of struct preempt_notifier: */    struct hlist_head preempt_notifiers;#endif    /*     * fpu_counter contains the number of consecutive context switches     * that the FPU is used. If this is over a threshold, the lazy fpu     * saving becomes unlazy to save the trap. This is an unsigned char     * so that after 256 times the counter wraps and the behavior turns     * lazy again; this to deal with bursty apps that only use FPU for     * a short time     */    unsigned char fpu_counter;#ifdef CONFIG_BLK_DEV_IO_TRACE    unsigned int btrace_seq;#endif    unsigned int policy;    int nr_cpus_allowed;    cpumask_t cpus_allowed;#ifdef CONFIG_PREEMPT_RCU    int rcu_read_lock_nesting;    char rcu_read_unlock_special;    struct list_head rcu_node_entry;#endif /* #ifdef CONFIG_PREEMPT_RCU */#ifdef CONFIG_TREE_PREEMPT_RCU    struct rcu_node *rcu_blocked_node;#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */#ifdef CONFIG_RCU_BOOST    struct rt_mutex *rcu_boost_mutex;#endif /* #ifdef CONFIG_RCU_BOOST */#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)    struct sched_info sched_info;#endif    struct list_head tasks;#ifdef CONFIG_SMP    struct plist_node pushable_tasks;#endif    struct mm_struct *mm, *active_mm;#ifdef CONFIG_COMPAT_BRK    unsigned brk_randomized:1;#endif#if defined(SPLIT_RSS_COUNTING)    struct task_rss_stat    rss_stat;#endif/* task state */    int exit_state;    int exit_code, exit_signal;    int pdeath_signal;  /*  The signal sent when the parent dies  */    unsigned int jobctl;    /* JOBCTL_*, siglock protected */    /* Used for emulating ABI behavior of previous Linux versions */    unsigned int personality;    unsigned did_exec:1;    unsigned in_execve:1;   /* Tell the LSMs that the process is doing an                 * execve */    unsigned in_iowait:1;    /* Revert to default priority/policy when forking */    unsigned sched_reset_on_fork:1;    unsigned sched_contributes_to_load:1;    unsigned long atomic_flags; /* Flags needing atomic access. */    pid_t pid;    pid_t tgid;#ifdef CONFIG_CC_STACKPROTECTOR    /* Canary value for the -fstack-protector gcc feature */    unsigned long stack_canary;#endif    /*     * pointers to (original) parent process, youngest child, younger sibling,     * older sibling, respectively.  (p->father can be replaced with     * p->real_parent->pid)     */    struct task_struct __rcu *real_parent; /* real parent process */    struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */    /*     * children/sibling forms the list of my natural children     */    struct list_head children;  /* list of my children */    struct list_head sibling;   /* linkage in my parent's children list */    struct task_struct *group_leader;   /* threadgroup leader */    /*     * ptraced is the list of tasks this task is using ptrace on.     * This includes both natural children and PTRACE_ATTACH targets.     * p->ptrace_entry is p's link on the p->parent->ptraced list.     */    struct list_head ptraced;    struct list_head ptrace_entry;    /* PID/PID hash table linkage. */    struct pid_link pids[PIDTYPE_MAX];    struct list_head thread_group;    struct list_head thread_node;    struct completion *vfork_done;      /* for vfork() */    int __user *set_child_tid;      /* CLONE_CHILD_SETTID */    int __user *clear_child_tid;        /* CLONE_CHILD_CLEARTID */    cputime_t utime, stime, utimescaled, stimescaled;    cputime_t gtime;    unsigned long long cpu_power;#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE    struct cputime prev_cputime;#endif#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN    seqlock_t vtime_seqlock;    unsigned long long vtime_snap;    enum {        VTIME_SLEEPING = 0,        VTIME_USER,        VTIME_SYS,    } vtime_snap_whence;#endif    unsigned long nvcsw, nivcsw; /* context switch counts */    struct timespec start_time;         /* monotonic time */    struct timespec real_start_time;    /* boot based time *//* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */    unsigned long min_flt, maj_flt;    struct task_cputime cputime_expires;    struct list_head cpu_timers[3];/* process credentials */    const struct cred __rcu *real_cred; /* objective and real subjective task                     * credentials (COW) */    const struct cred __rcu *cred;  /* effective (overridable) subjective task                     * credentials (COW) */    char comm[TASK_COMM_LEN]; /* executable name excluding path                     - access with [gs]et_task_comm (which lock                       it with task_lock())                     - initialized normally by setup_new_exec *//* file system info */    int link_count, total_link_count;#ifdef CONFIG_SYSVIPC/* ipc stuff */    struct sysv_sem sysvsem;#endif#ifdef CONFIG_DETECT_HUNG_TASK/* hung task detection */    unsigned long last_switch_count;#endif/* CPU-specific state of this task */    struct thread_struct thread;/* filesystem information */    struct fs_struct *fs;/* open file information */    struct files_struct *files;/* namespaces */    struct nsproxy *nsproxy;/* signal handlers */    struct signal_struct *signal;    struct sighand_struct *sighand;    sigset_t blocked, real_blocked;    sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */    struct sigpending pending;    unsigned long sas_ss_sp;    size_t sas_ss_size;    int (*notifier)(void *priv);    void *notifier_data;    sigset_t *notifier_mask;    struct callback_head *task_works;    struct audit_context *audit_context;#ifdef CONFIG_AUDITSYSCALL    kuid_t loginuid;    unsigned int sessionid;#endif    struct seccomp seccomp;/* Thread group tracking */    u32 parent_exec_id;    u32 self_exec_id;/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, * mempolicy */    spinlock_t alloc_lock;    /* Protection of the PI data structures: */    raw_spinlock_t pi_lock;#ifdef CONFIG_RT_MUTEXES    /* PI waiters blocked on a rt_mutex held by this task */    struct plist_head pi_waiters;    /* Deadlock detection and priority inheritance handling */    struct rt_mutex_waiter *pi_blocked_on;#endif#ifdef CONFIG_DEBUG_MUTEXES    /* mutex deadlock detection */    struct mutex_waiter *blocked_on;#endif#ifdef CONFIG_TRACE_IRQFLAGS    unsigned int irq_events;    unsigned long hardirq_enable_ip;    unsigned long hardirq_disable_ip;    unsigned int hardirq_enable_event;    unsigned int hardirq_disable_event;    int hardirqs_enabled;    int hardirq_context;    unsigned long softirq_disable_ip;    unsigned long softirq_enable_ip;    unsigned int softirq_disable_event;    unsigned int softirq_enable_event;    int softirqs_enabled;    int softirq_context;#endif#ifdef CONFIG_LOCKDEP# define MAX_LOCK_DEPTH 48UL    u64 curr_chain_key;    int lockdep_depth;    unsigned int lockdep_recursion;    struct held_lock held_locks[MAX_LOCK_DEPTH];    gfp_t lockdep_reclaim_gfp;#endif/* journalling filesystem info */    void *journal_info;/* stacked block device info */    struct bio_list *bio_list;#ifdef CONFIG_BLOCK/* stack plugging */    struct blk_plug *plug;#endif/* VM state */    struct reclaim_state *reclaim_state;    struct backing_dev_info *backing_dev_info;    struct io_context *io_context;    unsigned long ptrace_message;    siginfo_t *last_siginfo; /* For ptrace use.  */    struct task_io_accounting ioac;#if defined(CONFIG_TASK_XACCT)    u64 acct_rss_mem1;  /* accumulated rss usage */    u64 acct_vm_mem1;   /* accumulated virtual memory usage */    cputime_t acct_timexpd; /* stime + utime since last update */#endif#ifdef CONFIG_CPUSETS    nodemask_t mems_allowed;    /* Protected by alloc_lock */    seqcount_t mems_allowed_seq;    /* Seqence no to catch updates */    int cpuset_mem_spread_rotor;    int cpuset_slab_spread_rotor;#endif#ifdef CONFIG_CGROUPS    /* Control Group info protected by css_set_lock */    struct css_set __rcu *cgroups;    /* cg_list protected by css_set_lock and tsk->alloc_lock */    struct list_head cg_list;#endif#ifdef CONFIG_FUTEX    struct robust_list_head __user *robust_list;#ifdef CONFIG_COMPAT    struct compat_robust_list_head __user *compat_robust_list;#endif    struct list_head pi_state_list;    struct futex_pi_state *pi_state_cache;#endif#ifdef CONFIG_PERF_EVENTS    struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];    struct mutex perf_event_mutex;    struct list_head perf_event_list;#endif#ifdef CONFIG_NUMA    struct mempolicy *mempolicy;    /* Protected by alloc_lock */    short il_next;    short pref_node_fork;#endif#ifdef CONFIG_NUMA_BALANCING    int numa_scan_seq;    int numa_migrate_seq;    unsigned int numa_scan_period;    u64 node_stamp;         /* migration stamp  */    struct callback_head numa_work;#endif /* CONFIG_NUMA_BALANCING */    struct rcu_head rcu;    /*     * cache last used pipe for splice     */    struct pipe_inode_info *splice_pipe;    struct page_frag task_frag;#ifdef  CONFIG_TASK_DELAY_ACCT    struct task_delay_info *delays;#endif#ifdef CONFIG_FAULT_INJECTION    int make_it_fail;#endif    /*     * when (nr_dirtied >= nr_dirtied_pause), it's time to call     * balance_dirty_pages() for some dirty throttling pause     */    int nr_dirtied;    int nr_dirtied_pause;    unsigned long dirty_paused_when; /* start of a write-and-pause period */#ifdef CONFIG_LATENCYTOP    int latency_record_count;    struct latency_record latency_record[LT_SAVECOUNT];#endif    /*     * time slack values; these are used to round up poll() and     * select() etc timeout values. These are in nanoseconds.     */    unsigned long timer_slack_ns;    unsigned long default_timer_slack_ns;#ifdef CONFIG_FUNCTION_GRAPH_TRACER    /* Index of current stored address in ret_stack */    int curr_ret_stack;    /* Stack of return addresses for return function tracing */    struct ftrace_ret_stack *ret_stack;    /* time stamp for last schedule */    unsigned long long ftrace_timestamp;    /*     * Number of functions that haven't been traced     * because of depth overrun.     */    atomic_t trace_overrun;    /* Pause for the tracing */    atomic_t tracing_graph_pause;#endif#ifdef CONFIG_TRACING    /* state flags for use by tracers */    unsigned long trace;    /* bitmask and counter of trace recursion */    unsigned long trace_recursion;#endif /* CONFIG_TRACING */#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */    struct memcg_batch_info {        int do_batch;   /* incremented when batch uncharge started */        struct mem_cgroup *memcg; /* target memcg of uncharge */        unsigned long nr_pages; /* uncharged usage */        unsigned long memsw_nr_pages; /* uncharged mem+swap usage */    } memcg_batch;    unsigned int memcg_kmem_skip_account;#endif#ifdef CONFIG_HAVE_HW_BREAKPOINT    atomic_t ptrace_bp_refcnt;#endif#ifdef CONFIG_UPROBES    struct uprobe_task *utask;#endif#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)    unsigned int    sequential_io;    unsigned int    sequential_io_avg;#endif};
原创粉丝点击