linux中断导读之--注册部分

来源:互联网 发布:docker网络详解 编辑:程序博客网 时间:2024/05/17 08:20
==================================
本文系本站原创,欢迎转载!
转载请注明出处:http://blog.csdn.net/gdt_a20

==================================

前面介绍了中断的初始化代码,稍带着执行流程也过的差不多了

这里看下我们经常碰到的中断的注册,看看和前面是怎么关联起来的,

也就是request_irq函数;

#include/linux/interrupt.h中,


static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
        const char *name, void *dev)
{
    return request_threaded_irq(irq, handler, NULL, flags, name, dev);
}

参数为中断号,中断处理函数,标志

#在kernel/irq/manage.c中,

/**
 *    request_threaded_irq - allocate an interrupt line
 *    @irq: Interrupt line to allocate
 *    @handler: Function to be called when the IRQ occurs.
 *          Primary handler for threaded interrupts
 *          If NULL and thread_fn != NULL the default
 *          primary handler is installed
 *    @thread_fn: Function called from the irq handler thread
 *            If NULL, no irq thread is created
 *    @irqflags: Interrupt type flags
 *    @devname: An ascii name for the claiming device
 *    @dev_id: A cookie passed back to the handler function
 *
 *    This call allocates interrupt resources and enables the
 *    interrupt line and IRQ handling. From the point this
 *    call is made your handler function may be invoked. Since
 *    your handler function must clear any interrupt the board
 *    raises, you must take care both to initialise your hardware
 *    and to set up the interrupt handler in the right order.
 *
 *    If you want to set up a threaded irq handler for your device
 *    then you need to supply @handler and @thread_fn. @handler ist
 *    still called in hard interrupt context and has to check
 *    whether the interrupt originates from the device. If yes it
 *    needs to disable the interrupt on the device and return
 *    IRQ_WAKE_THREAD which will wake up the handler thread and run
 *    @thread_fn. This split handler design is necessary to support
 *    shared interrupts.
 *
 *    Dev_id must be globally unique. Normally the address of the
 *    device data structure is used as the cookie. Since the handler
 *    receives this value it makes sense to use it.
 *
 *    If your interrupt is shared you must pass a non NULL dev_id
 *    as this is required when freeing the interrupt.
 *
 *    Flags:
 *
 *    IRQF_SHARED        Interrupt is shared
 *    IRQF_SAMPLE_RANDOM    The interrupt can be used for entropy
 *    IRQF_TRIGGER_*        Specify active edge(s) or level
 *
 */
int request_threaded_irq(unsigned int irq, irq_handler_t handler,
             irq_handler_t thread_fn, unsigned long irqflags,
             const char *devname, void *dev_id)
{
    struct irqaction *action;
    struct irq_desc *desc;
    int retval;

    /*
     * Sanity-check: shared interrupts must pass in a real dev-ID,
     * otherwise we'll have trouble later trying to figure out
     * which interrupt is which (messes up the interrupt freeing
     * logic etc).
     */
    if ((irqflags & IRQF_SHARED) && !dev_id)              //如果是shared那么需要dev_id用于区分
        return -EINVAL;

    desc = irq_to_desc(irq);                                               //得到描述结构
    if (!desc)
        return -EINVAL;

    if (!irq_settings_can_request(desc) ||
        WARN_ON(irq_settings_is_per_cpu_devid(desc)))
        return -EINVAL;

    if (!handler) {
        if (!thread_fn)
            return -EINVAL;
        handler = irq_default_primary_handler;
    }

    action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);    //分配一个action结构
    if (!action)
        return -ENOMEM;

    action->handler = handler;                            
    action->thread_fn = thread_fn;
    action->flags = irqflags;
    action->name = devname;
    action->dev_id = dev_id;

    chip_bus_lock(desc);
    retval = __setup_irq(irq, desc, action);                               //********
    chip_bus_sync_unlock(desc);

    if (retval)
        kfree(action);

#ifdef CONFIG_DEBUG_SHIRQ_FIXME
    if (!retval && (irqflags & IRQF_SHARED)) {
        /*
         * It's a shared IRQ -- the driver ought to be prepared for it
         * to happen immediately, so let's make sure....
         * We disable the irq to make sure that a 'real' IRQ doesn't
         * run in parallel with our fake.
         */
        unsigned long flags;

        disable_irq(irq);
        local_irq_save(flags);

        handler(irq, dev_id);

        local_irq_restore(flags);
        enable_irq(irq);
    }
#endif
    return retval;
}
EXPORT_SYMBOL(request_threaded_irq);

#同文件夹下

/*
 * Internal function to register an irqaction - typically used to
 * allocate special interrupts that are part of the architecture.
 */
static int
__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
{
    struct irqaction *old, **old_ptr;
    const char *old_name = NULL;
    unsigned long flags, thread_mask = 0;
    int ret, nested, shared = 0;
    cpumask_var_t mask;

    if (!desc)
        return -EINVAL;

    if (desc->irq_data.chip == &no_irq_chip)
        return -ENOSYS;
    if (!try_module_get(desc->owner))
        return -ENODEV;
    /*
     * Some drivers like serial.c use request_irq() heavily,
     * so we have to be careful not to interfere with a
     * running system.
     */
    if (new->flags & IRQF_SAMPLE_RANDOM) {
        /*
         * This function might sleep, we want to call it first,
         * outside of the atomic block.
         * Yes, this might clear the entropy pool if the wrong
         * driver is attempted to be loaded, without actually
         * installing a new handler, but is this really a problem,
         * only the sysadmin is able to do this.
         */
        rand_initialize_irq(irq);
    }

    /*
     * Check whether the interrupt nests into another interrupt
     * thread.
     */
    nested = irq_settings_is_nested_thread(desc);
    if (nested) {
        if (!new->thread_fn) {
            ret = -EINVAL;
            goto out_mput;
        }
        /*
         * Replace the primary handler which was provided from
         * the driver for non nested interrupt handling by the
         * dummy function which warns when called.
         */
        new->handler = irq_nested_primary_handler;
    } else {
        if (irq_settings_can_thread(desc))
            irq_setup_forced_threading(new);
    }

    /*
     * Create a handler thread when a thread function is supplied
     * and the interrupt does not nest into another interrupt
     * thread.
     */
    if (new->thread_fn && !nested) {
        struct task_struct *t;

        t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
                   new->name);
        if (IS_ERR(t)) {
            ret = PTR_ERR(t);
            goto out_mput;
        }
        /*
         * We keep the reference to the task struct even if
         * the thread dies to avoid that the interrupt code
         * references an already freed task_struct.
         */
        get_task_struct(t);
        new->thread = t;
    }

    if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
        ret = -ENOMEM;
        goto out_thread;
    }

    /*
     * The following block of code has to be executed atomically
     */
    raw_spin_lock_irqsave(&desc->lock, flags);
    old_ptr = &desc->action;
    old = *old_ptr;
    if (old) {
        /*
         * Can't share interrupts unless both agree to and are
         * the same type (level, edge, polarity). So both flag
         * fields must have IRQF_SHARED set and the bits which
         * set the trigger type must match. Also all must
         * agree on ONESHOT.
         */
        if (!((old->flags & new->flags) & IRQF_SHARED) ||
            ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
            ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
            old_name = old->name;
            goto mismatch;
        }

        /* All handlers must agree on per-cpuness */
        if ((old->flags & IRQF_PERCPU) !=
            (new->flags & IRQF_PERCPU))
            goto mismatch;

        /* add new interrupt at end of irq queue */
        do {
            thread_mask |= old->thread_mask;
            old_ptr = &old->next;
            old = *old_ptr;
        } while (old);
        shared = 1;
    }

    /*
     * Setup the thread mask for this irqaction. Unlikely to have
     * 32 resp 64 irqs sharing one line, but who knows.
     */
    if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
        ret = -EBUSY;
        goto out_mask;
    }
    new->thread_mask = 1 << ffz(thread_mask);

    if (!shared) {
        init_waitqueue_head(&desc->wait_for_threads);

        /* Setup the type (level, edge polarity) if configured: */
        if (new->flags & IRQF_TRIGGER_MASK) {
            ret = __irq_set_trigger(desc, irq,
                    new->flags & IRQF_TRIGGER_MASK);

            if (ret)
                goto out_mask;
        }

        desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
                  IRQS_ONESHOT | IRQS_WAITING);
        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);

        if (new->flags & IRQF_PERCPU) {
            irqd_set(&desc->irq_data, IRQD_PER_CPU);
            irq_settings_set_per_cpu(desc);
        }

        if (new->flags & IRQF_ONESHOT)
            desc->istate |= IRQS_ONESHOT;

        if (irq_settings_can_autoenable(desc))
            irq_startup(desc);
        else
            /* Undo nested disables: */
            desc->depth = 1;

        /* Exclude IRQ from balancing if requested */
        if (new->flags & IRQF_NOBALANCING) {
            irq_settings_set_no_balancing(desc);
            irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
        }

        /* Set default affinity mask once everything is setup */
        setup_affinity(irq, desc, mask);

    } else if (new->flags & IRQF_TRIGGER_MASK) {
        unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
        unsigned int omsk = irq_settings_get_trigger_mask(desc);

        if (nmsk != omsk)
            /* hope the handler works with current  trigger mode */
            pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
                   irq, nmsk, omsk);
    }

    new->irq = irq;
    *old_ptr = new;

    /* Reset broken irq detection when installing new handler */
    desc->irq_count = 0;
    desc->irqs_unhandled = 0;

    /*
     * Check whether we disabled the irq via the spurious handler
     * before. Reenable it and give it another chance.
     */
    if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
        desc->istate &= ~IRQS_SPURIOUS_DISABLED;
        __enable_irq(desc, irq, false);
    }

    raw_spin_unlock_irqrestore(&desc->lock, flags);

    /*
     * Strictly no need to wake it up, but hung_task complains
     * when no hard interrupt wakes the thread up.
     */
    if (new->thread)
        wake_up_process(new->thread);

    register_irq_proc(irq, desc);
    new->dir = NULL;
    register_handler_proc(irq, new);
    free_cpumask_var(mask);

    return 0;

mismatch:
#ifdef CONFIG_DEBUG_SHIRQ
    if (!(new->flags & IRQF_PROBE_SHARED)) {
        printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
        if (old_name)
            printk(KERN_ERR "current handler: %s\n", old_name);
        dump_stack();
    }
#endif
    ret = -EBUSY;

out_mask:
    raw_spin_unlock_irqrestore(&desc->lock, flags);
    free_cpumask_var(mask);

out_thread:
    if (new->thread) {
        struct task_struct *t = new->thread;

        new->thread = NULL;
        if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
            kthread_stop(t);
        put_task_struct(t);
    }
out_mput:
    module_put(desc->owner);
    return ret;
}

//如果是share会挂接新的action,如果是新irq,会关联该action。