MMC子系统调用过程浅析(Card层)

来源:互联网 发布:蓝牙耳机控制软件 编辑:程序博客网 时间:2024/06/08 00:58

MMC驱动依据结构层次分为底层驱动、守护线程、集群读写、电源管理及热插拔管理5个部分。

MMC设备在系统内核中是块设备,块设备模型可以参考一下

http://blog.csdn.net/qq_33160790/article/details/77938438

Card层:


下面从mmc_blk_data->gendisk中的queue调用开始分析。

mq->queue = blk_init_queue(mmc_request_fn, lock);

md->disk->queue = md->queue.queue;

找到mmc_request_fn:

static void mmc_request_fn(struct request_queue *q){struct mmc_queue *mq = q->queuedata;struct request *req;unsigned long flags;struct mmc_context_info *cntx;if (!mq) {while ((req = blk_fetch_request(q)) != NULL) {req->cmd_flags |= REQ_QUIET;__blk_end_request_all(req, -EIO);}return;}cntx = &mq->card->host->context_info;if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {/* * New MMC request arrived when MMC thread may be * blocked on the previous request to be complete * with no current request fetched */spin_lock_irqsave(&cntx->lock, flags);if (cntx->is_waiting_last_req) {cntx->is_new_req = true;wake_up_interruptible(&cntx->wait);}spin_unlock_irqrestore(&cntx->lock, flags);} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)wake_up_process(mq->thread);}
当对块设备读写时,wake_up_process(mq->thread),调用mmc_queue中指向的task_struct线程。

mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",host->index, subname ? subname : "");
可以看到这个线程名为mmc_queue_thread

static int mmc_queue_thread(void *d){struct mmc_queue *mq = d;struct request_queue *q = mq->queue;current->flags |= PF_MEMALLOC;down(&mq->thread_sem);do {struct request *req = NULL;unsigned int cmd_flags = 0;spin_lock_irq(q->queue_lock);set_current_state(TASK_INTERRUPTIBLE);req = blk_fetch_request(q);mq->mqrq_cur->req = req;spin_unlock_irq(q->queue_lock);if (req || mq->mqrq_prev->req) {set_current_state(TASK_RUNNING);cmd_flags = req ? req->cmd_flags : 0;mq->issue_fn(mq, req);cond_resched();if (mq->flags & MMC_QUEUE_NEW_REQUEST) {mq->flags &= ~MMC_QUEUE_NEW_REQUEST;continue; /* fetch again */}/* * Current request becomes previous request * and vice versa. * In case of special requests, current request * has been finished. Do not assign it to previous * request. */if (cmd_flags & MMC_REQ_SPECIAL_MASK)mq->mqrq_cur->req = NULL;mq->mqrq_prev->brq.mrq.data = NULL;mq->mqrq_prev->req = NULL;swap(mq->mqrq_prev, mq->mqrq_cur);} else {if (kthread_should_stop()) {set_current_state(TASK_RUNNING);break;}up(&mq->thread_sem);schedule();down(&mq->thread_sem);}} while (1);up(&mq->thread_sem);return 0;}
mmc_queue_thread这个函数调用mmc_queue->issue_fn来处理mmc_queue->queue,注意此时req = blk_fetch_request(q),request_queue上的request也被传入mmc_queue->issue_fn函数中
md->queue.issue_fn = mmc_blk_issue_rq;
找到mmc_blk_issue_rq

static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req){int ret;struct mmc_blk_data *md = mq->data;struct mmc_card *card = md->queue.card;struct mmc_host *host = card->host;unsigned long flags;unsigned int cmd_flags = req ? req->cmd_flags : 0;if (req && !mq->mqrq_prev->req)/* claim host only for the first request */mmc_get_card(card);ret = mmc_blk_part_switch(card, md);if (ret) {if (req) {blk_end_request_all(req, -EIO);}ret = 0;goto out;}mq->flags &= ~MMC_QUEUE_NEW_REQUEST;if (cmd_flags & REQ_DISCARD) {/* complete ongoing async transfer before issuing discard */if (card->host->areq)mmc_blk_issue_rw_rq(mq, NULL);if (req->cmd_flags & REQ_SECURE)ret = mmc_blk_issue_secdiscard_rq(mq, req);elseret = mmc_blk_issue_discard_rq(mq, req);} else if (cmd_flags & REQ_FLUSH) {/* complete ongoing async transfer before issuing flush */if (card->host->areq)mmc_blk_issue_rw_rq(mq, NULL);ret = mmc_blk_issue_flush(mq, req);} else {if (!req && host->areq) {spin_lock_irqsave(&host->context_info.lock, flags);host->context_info.is_waiting_last_req = true;spin_unlock_irqrestore(&host->context_info.lock, flags);}ret = mmc_blk_issue_rw_rq(mq, req);}out:if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||     (cmd_flags & MMC_REQ_SPECIAL_MASK))/* * Release host when there are no more requests * and after special request(discard, flush) is done. * In case sepecial request, there is no reentry to * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'. */mmc_put_card(card);return ret;}

mmc_blk_issue_rq读写函数是mmc_blk_issue_rw_rq(mq, req)

static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc){struct mmc_blk_data *md = mq->data;struct mmc_card *card = md->queue.card;struct mmc_blk_request *brq = &mq->mqrq_cur->brq;int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;enum mmc_blk_status status;struct mmc_queue_req *mq_rq;struct request *req = rqc;struct mmc_async_req *areq;const u8 packed_nr = 2;u8 reqs = 0;if (!rqc && !mq->mqrq_prev->req)return 0;if (rqc)reqs = mmc_blk_prep_packed_list(mq, rqc);do {if (rqc) {/* * When 4KB native sector is enabled, only 8 blocks * multiple read or write is allowed */if ((brq->data.blocks & 0x07) &&    (card->ext_csd.data_sector_size == 4096)) {pr_err("%s: Transfer size is not 4KB sector size aligned\n",req->rq_disk->disk_name);mq_rq = mq->mqrq_cur;goto cmd_abort;}if (reqs >= packed_nr)mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,    card, mq);elsemmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);areq = &mq->mqrq_cur->mmc_active;} elseareq = NULL;areq = mmc_start_req(card->host, areq, (int *) &status);if (!areq) {if (status == MMC_BLK_NEW_REQUEST)mq->flags |= MMC_QUEUE_NEW_REQUEST;return 0;}mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);brq = &mq_rq->brq;req = mq_rq->req;type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;mmc_queue_bounce_post(mq_rq);switch (status) {case MMC_BLK_SUCCESS:case MMC_BLK_PARTIAL:/* * A block was successfully transferred. */mmc_blk_reset_success(md, type);if (mmc_packed_cmd(mq_rq->cmd_type)) {ret = mmc_blk_end_packed_req(mq_rq);break;} else {ret = blk_end_request(req, 0,brq->data.bytes_xfered);}/* * If the blk_end_request function returns non-zero even * though all data has been transferred and no errors * were returned by the host controller, it's a bug. */if (status == MMC_BLK_SUCCESS && ret) {pr_err("%s BUG rq_tot %d d_xfer %d\n",       __func__, blk_rq_bytes(req),       brq->data.bytes_xfered);rqc = NULL;goto cmd_abort;}break;case MMC_BLK_CMD_ERR:ret = mmc_blk_cmd_err(md, card, brq, req, ret);if (mmc_blk_reset(md, card->host, type))goto cmd_abort;if (!ret)goto start_new_req;break;case MMC_BLK_RETRY:retune_retry_done = brq->retune_retry_done;if (retry++ < 5)break;/* Fall through */case MMC_BLK_ABORT:if (!mmc_blk_reset(md, card->host, type))break;goto cmd_abort;case MMC_BLK_DATA_ERR: {int err;err = mmc_blk_reset(md, card->host, type);if (!err)break;if (err == -ENODEV ||mmc_packed_cmd(mq_rq->cmd_type))goto cmd_abort;/* Fall through */}case MMC_BLK_ECC_ERR:if (brq->data.blocks > 1) {/* Redo read one sector at a time */pr_warn("%s: retrying using single block read\n",req->rq_disk->disk_name);disable_multi = 1;break;}/* * After an error, we redo I/O one sector at a * time, so we only reach here after trying to * read a single sector. */ret = blk_end_request(req, -EIO,brq->data.blksz);if (!ret)goto start_new_req;break;case MMC_BLK_NOMEDIUM:goto cmd_abort;default:pr_err("%s: Unhandled return value (%d)",req->rq_disk->disk_name, status);goto cmd_abort;}if (ret) {if (mmc_packed_cmd(mq_rq->cmd_type)) {if (!mq_rq->packed->retries)goto cmd_abort;mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);mmc_start_req(card->host,      &mq_rq->mmc_active, NULL);} else {/* * In case of a incomplete request * prepare it again and resend. */mmc_blk_rw_rq_prep(mq_rq, card,disable_multi, mq);mmc_start_req(card->host,&mq_rq->mmc_active, NULL);}mq_rq->brq.retune_retry_done = retune_retry_done;}} while (ret);return 1; cmd_abort:if (mmc_packed_cmd(mq_rq->cmd_type)) {mmc_blk_abort_packed_req(mq_rq);} else {if (mmc_card_removed(card))req->cmd_flags |= REQ_QUIET;while (ret)ret = blk_end_request(req, -EIO,blk_rq_cur_bytes(req));} start_new_req:if (rqc) {if (mmc_card_removed(card)) {rqc->cmd_flags |= REQ_QUIET;blk_end_request_all(rqc, -EIO);} else {/* * If current request is packed, it needs to put back. */if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))mmc_blk_revert_packed_req(mq, mq->mqrq_cur);mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);mmc_start_req(card->host,      &mq->mqrq_cur->mmc_active, NULL);}}return 0;}
它调用mmc_start_req来完成读写请求,而mmc_start_req是在core层。







原创粉丝点击