scsi调用流程

来源:互联网 发布:软件停服通知 编辑:程序博客网 时间:2024/04/30 21:17

这里只是记录了一下scsi命令执行的流程,存在问题欢迎一起交流

 

void generic_make_request(struct bio *bio)

static inline void __generic_make_request(struct bio *bio)里面ret = q->make_request_fn(q, bio);调用了__make_request

static inline void add_request(struct request_queue * q, struct request * req)

void elv_insert(struct request_queue *q, struct request *rq, int where)一般where = ELEVATOR_INSERT_SORT

 

void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)

{

       /*

        * set defaults

        */

       q->nr_requests = BLKDEV_MAX_RQ;

       blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);

       blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);

       q->make_request_fn = mfn;

       q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;

       q->backing_dev_info.state = 0;

       q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;

       blk_queue_max_sectors(q, SAFE_MAX_SECTORS);

       blk_queue_hardsect_size(q, 512);

       blk_queue_dma_alignment(q, 511);

       blk_queue_congestion_threshold(q);

       q->nr_batching = BLK_BATCH_REQ;

 

       q->unplug_thresh = 4;        /* hmm */

       q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */

       if (q->unplug_delay == 0)

              q->unplug_delay = 1;

 

       初始化任务和定时函数

       INIT_WORK(&q->unplug_work, blk_unplug_work);

 

       q->unplug_timer.function = blk_unplug_timeout;

       q->unplug_timer.data = (unsigned long)q;

 

       /*

        * by default assume old behaviour and bounce for any highmem page

        */

       blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);

}

 

static void blk_unplug_work(struct work_struct *work)

{

       struct request_queue *q =

              container_of(work, struct request_queue, unplug_work);

 

       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,

                            q->rq.count[READ] + q->rq.count[WRITE]);

 

      q->unplug_fn(q);

}

会触发q->unplug_fn(q)struct request_queue *

blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)中初始化了q->unplug_fn        = generic_unplug_device;因此这里会调用generic_unplug_device函数

void generic_unplug_device(struct request_queue *q)

{

       spin_lock_irq(q->queue_lock);

       __generic_unplug_device(q);

       spin_unlock_irq(q->queue_lock);

}

void __generic_unplug_device(struct request_queue *q)

{

       if (unlikely(blk_queue_stopped(q)))

              return;

       if (!blk_remove_plug(q))

              return;

       q->request_fn(q);

}

q->request_fn(q);会去调用scsi_request_fn函数,过程:

struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)

{

       struct request_queue *q;

 

       q = __scsi_alloc_queue(sdev->host, scsi_request_fn);

       if (!q)

              return NULL;

 

       blk_queue_prep_rq(q, scsi_prep_fn);

       blk_queue_softirq_done(q, scsi_softirq_done);

       return q;

}

函数struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,

                                    request_fn_proc *request_fn)会调用blk_init_queueblk_init_queue中调用blk_init_queue_node

struct request_queue *

blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)

{

       struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);

 

       if (!q)

              return NULL;

 

       q->node = node_id;

       if (blk_init_free_list(q)) {

              kmem_cache_free(requestq_cachep, q);

              return NULL;

       }

 

       /*

        * if caller didn't supply a lock, they get per-queue locking with

        * our embedded lock

        */

       if (!lock) {

              spin_lock_init(&q->__queue_lock);

              lock = &q->__queue_lock;

       }

 

       q->request_fn              = rfn;//最终的赋值语句

       q->prep_rq_fn            = NULL;

       q->unplug_fn        = generic_unplug_device;

       q->queue_flags            = (1 << QUEUE_FLAG_CLUSTER);

       q->queue_lock            = lock;

 

       blk_queue_segment_boundary(q, 0xffffffff);

 

       blk_queue_make_request(q, __make_request);

       blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);

 

       blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);

       blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);

 

       q->sg_reserved_size = INT_MAX;

 

       /*

        * all done

        */

       if (!elevator_init(q, NULL)) {

              blk_queue_congestion_threshold(q);

              return q;

       }

 

       blk_put_queue(q);

       return NULL;

}

scsi_request_fn函数

static void scsi_request_fn(struct request_queue *q)

{

       struct scsi_device *sdev = q->queuedata;

       struct Scsi_Host *shost;

       struct scsi_cmnd *cmd;

       struct request *req;

      

       if (!sdev) {

              printk("scsi: killing requests for dead queue\n");

              while ((req = elv_next_request(q)) != NULL)

                     scsi_kill_request(req, q);

              return;

       }

 

       if(!get_device(&sdev->sdev_gendev))

              /* We must be tearing the block queue down already */

              return;

 

       /*

        * To start with, we keep looping until the queue is empty, or until

        * the host is no longer able to accept any more requests.

        */

       shost = sdev->host;

       while (!blk_queue_plugged(q)) {

              int rtn;

              /*

               * get next queueable request.  We do this early to make sure

               * that the request is fully prepared even if we cannot

               * accept it.

               */

              req = elv_next_request(q);

              if (!req || !scsi_dev_queue_ready(q, sdev))

                     break;

 

              if (unlikely(!scsi_device_online(sdev))) {

                     sdev_printk(KERN_ERR, sdev,

                                "rejecting I/O to offline device\n");

                     scsi_kill_request(req, q);

                     continue;

              }

 

 

              /*

               * Remove the request from the request list.

               */

              if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))

                     blkdev_dequeue_request(req);

              sdev->device_busy++;

 

              spin_unlock(q->queue_lock);

              cmd = req->special;/*cmd from here,coming from sd.c*/

              if (unlikely(cmd == NULL)) {

                     printk(KERN_CRIT "impossible request in %s.\n"

                                    "please mail a stack trace to "

                                    "linux-scsi@vger.kernel.org\n",

                                    __FUNCTION__);

                     blk_dump_rq_flags(req, "foo");

                     BUG();

              }

              spin_lock(shost->host_lock);

 

              if (!scsi_host_queue_ready(q, shost, sdev))

                     goto not_ready;

              if (sdev->single_lun) {

                     if (scsi_target(sdev)->starget_sdev_user &&

                         scsi_target(sdev)->starget_sdev_user != sdev)

                            goto not_ready;

                     scsi_target(sdev)->starget_sdev_user = sdev;

              }

              shost->host_busy++;

 

              /*

               * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will

               *          take the lock again.

               */

              spin_unlock_irq(shost->host_lock);

 

              /*

               * Finally, initialize any error handling parameters, and set up

               * the timers for timeouts.

               */

              scsi_init_cmd_errh(cmd);

 

              /*

               * Dispatch the command to the low-level driver.

               */

              rtn = scsi_dispatch_cmd(cmd);

              spin_lock_irq(q->queue_lock);

              if(rtn) {

                     /* we're refusing the command; because of

                      * the way locks get dropped, we need to

                      * check here if plugging is required */

                     if(sdev->device_busy == 0)

                            blk_plug_device(q);

 

                     break;

              }

       }

 

       goto out;

 

 not_ready:

       spin_unlock_irq(shost->host_lock);

 

       /*

        * lock q, handle tag, requeue req, and decrement device_busy. We

        * must return with queue_lock held.

        *

        * Decrementing device_busy without checking it is OK, as all such

        * cases (host limits or settings) should run the queue at some

        * later time.

        */

       spin_lock_irq(q->queue_lock);

       blk_requeue_request(q, req);

       sdev->device_busy--;

       if(sdev->device_busy == 0)

              blk_plug_device(q);

 out:

       /* must be careful here...if we trigger the ->remove() function

        * we cannot be holding the q lock */

       spin_unlock_irq(q->queue_lock);

       put_device(&sdev->sdev_gendev);

       spin_lock_irq(q->queue_lock);

}

其中elv_next_request函数中ret = q->prep_rq_fn(q, rq);会调用sd_prep_fnscsi_prep_fn这两个函数

赋值的地方:

struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)

{

       struct request_queue *q;

 

       q = __scsi_alloc_queue(sdev->host, scsi_request_fn);

       if (!q)

              return NULL;

 

       blk_queue_prep_rq(q, scsi_prep_fn);

       blk_queue_softirq_done(q, scsi_softirq_done);

       return q;

}

以及sd_probe函数里面的blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);

 

int scsi_prep_fn(struct request_queue *q, struct request *req)

{

       struct scsi_device *sdev = q->queuedata;

       int ret = BLKPREP_KILL;

             

       if (req->cmd_type == REQ_TYPE_BLOCK_PC)

              ret = scsi_setup_blk_pc_cmnd(sdev, req);

       return scsi_prep_return(q, req, ret);

}

建立一个普通的scsi命令,在sd模块没注册时会调用这个函数,sd模块注册之后就会调用下面的函数sd_prep_fn建立读写的scsi命令以及scsi_setup_blk_pc_cmnd


原创粉丝点击