Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *
0004  * MMC software queue support based on command queue interfaces
0005  *
0006  * Copyright (C) 2019 Linaro, Inc.
0007  * Author: Baolin Wang <baolin.wang@linaro.org>
0008  */
0009 
0010 #include <linux/mmc/card.h>
0011 #include <linux/mmc/host.h>
0012 #include <linux/module.h>
0013 
0014 #include "mmc_hsq.h"
0015 
0016 #define HSQ_NUM_SLOTS   64
0017 #define HSQ_INVALID_TAG HSQ_NUM_SLOTS
0018 
0019 static void mmc_hsq_retry_handler(struct work_struct *work)
0020 {
0021     struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
0022     struct mmc_host *mmc = hsq->mmc;
0023 
0024     mmc->ops->request(mmc, hsq->mrq);
0025 }
0026 
0027 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
0028 {
0029     struct mmc_host *mmc = hsq->mmc;
0030     struct hsq_slot *slot;
0031     unsigned long flags;
0032     int ret = 0;
0033 
0034     spin_lock_irqsave(&hsq->lock, flags);
0035 
0036     /* Make sure we are not already running a request now */
0037     if (hsq->mrq || hsq->recovery_halt) {
0038         spin_unlock_irqrestore(&hsq->lock, flags);
0039         return;
0040     }
0041 
0042     /* Make sure there are remain requests need to pump */
0043     if (!hsq->qcnt || !hsq->enabled) {
0044         spin_unlock_irqrestore(&hsq->lock, flags);
0045         return;
0046     }
0047 
0048     slot = &hsq->slot[hsq->next_tag];
0049     hsq->mrq = slot->mrq;
0050     hsq->qcnt--;
0051 
0052     spin_unlock_irqrestore(&hsq->lock, flags);
0053 
0054     if (mmc->ops->request_atomic)
0055         ret = mmc->ops->request_atomic(mmc, hsq->mrq);
0056     else
0057         mmc->ops->request(mmc, hsq->mrq);
0058 
0059     /*
0060      * If returning BUSY from request_atomic(), which means the card
0061      * may be busy now, and we should change to non-atomic context to
0062      * try again for this unusual case, to avoid time-consuming operations
0063      * in the atomic context.
0064      *
0065      * Note: we just give a warning for other error cases, since the host
0066      * driver will handle them.
0067      */
0068     if (ret == -EBUSY)
0069         schedule_work(&hsq->retry_work);
0070     else
0071         WARN_ON_ONCE(ret);
0072 }
0073 
0074 static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
0075 {
0076     struct hsq_slot *slot;
0077     int tag;
0078 
0079     /*
0080      * If there are no remain requests in software queue, then set a invalid
0081      * tag.
0082      */
0083     if (!remains) {
0084         hsq->next_tag = HSQ_INVALID_TAG;
0085         return;
0086     }
0087 
0088     /*
0089      * Increasing the next tag and check if the corresponding request is
0090      * available, if yes, then we found a candidate request.
0091      */
0092     if (++hsq->next_tag != HSQ_INVALID_TAG) {
0093         slot = &hsq->slot[hsq->next_tag];
0094         if (slot->mrq)
0095             return;
0096     }
0097 
0098     /* Othersie we should iterate all slots to find a available tag. */
0099     for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
0100         slot = &hsq->slot[tag];
0101         if (slot->mrq)
0102             break;
0103     }
0104 
0105     if (tag == HSQ_NUM_SLOTS)
0106         tag = HSQ_INVALID_TAG;
0107 
0108     hsq->next_tag = tag;
0109 }
0110 
0111 static void mmc_hsq_post_request(struct mmc_hsq *hsq)
0112 {
0113     unsigned long flags;
0114     int remains;
0115 
0116     spin_lock_irqsave(&hsq->lock, flags);
0117 
0118     remains = hsq->qcnt;
0119     hsq->mrq = NULL;
0120 
0121     /* Update the next available tag to be queued. */
0122     mmc_hsq_update_next_tag(hsq, remains);
0123 
0124     if (hsq->waiting_for_idle && !remains) {
0125         hsq->waiting_for_idle = false;
0126         wake_up(&hsq->wait_queue);
0127     }
0128 
0129     /* Do not pump new request in recovery mode. */
0130     if (hsq->recovery_halt) {
0131         spin_unlock_irqrestore(&hsq->lock, flags);
0132         return;
0133     }
0134 
0135     spin_unlock_irqrestore(&hsq->lock, flags);
0136 
0137      /*
0138       * Try to pump new request to host controller as fast as possible,
0139       * after completing previous request.
0140       */
0141     if (remains > 0)
0142         mmc_hsq_pump_requests(hsq);
0143 }
0144 
0145 /**
0146  * mmc_hsq_finalize_request - finalize one request if the request is done
0147  * @mmc: the host controller
0148  * @mrq: the request need to be finalized
0149  *
0150  * Return true if we finalized the corresponding request in software queue,
0151  * otherwise return false.
0152  */
0153 bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
0154 {
0155     struct mmc_hsq *hsq = mmc->cqe_private;
0156     unsigned long flags;
0157 
0158     spin_lock_irqsave(&hsq->lock, flags);
0159 
0160     if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
0161         spin_unlock_irqrestore(&hsq->lock, flags);
0162         return false;
0163     }
0164 
0165     /*
0166      * Clear current completed slot request to make a room for new request.
0167      */
0168     hsq->slot[hsq->next_tag].mrq = NULL;
0169 
0170     spin_unlock_irqrestore(&hsq->lock, flags);
0171 
0172     mmc_cqe_request_done(mmc, hsq->mrq);
0173 
0174     mmc_hsq_post_request(hsq);
0175 
0176     return true;
0177 }
0178 EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
0179 
0180 static void mmc_hsq_recovery_start(struct mmc_host *mmc)
0181 {
0182     struct mmc_hsq *hsq = mmc->cqe_private;
0183     unsigned long flags;
0184 
0185     spin_lock_irqsave(&hsq->lock, flags);
0186 
0187     hsq->recovery_halt = true;
0188 
0189     spin_unlock_irqrestore(&hsq->lock, flags);
0190 }
0191 
0192 static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
0193 {
0194     struct mmc_hsq *hsq = mmc->cqe_private;
0195     int remains;
0196 
0197     spin_lock_irq(&hsq->lock);
0198 
0199     hsq->recovery_halt = false;
0200     remains = hsq->qcnt;
0201 
0202     spin_unlock_irq(&hsq->lock);
0203 
0204     /*
0205      * Try to pump new request if there are request pending in software
0206      * queue after finishing recovery.
0207      */
0208     if (remains > 0)
0209         mmc_hsq_pump_requests(hsq);
0210 }
0211 
0212 static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
0213 {
0214     struct mmc_hsq *hsq = mmc->cqe_private;
0215     int tag = mrq->tag;
0216 
0217     spin_lock_irq(&hsq->lock);
0218 
0219     if (!hsq->enabled) {
0220         spin_unlock_irq(&hsq->lock);
0221         return -ESHUTDOWN;
0222     }
0223 
0224     /* Do not queue any new requests in recovery mode. */
0225     if (hsq->recovery_halt) {
0226         spin_unlock_irq(&hsq->lock);
0227         return -EBUSY;
0228     }
0229 
0230     hsq->slot[tag].mrq = mrq;
0231 
0232     /*
0233      * Set the next tag as current request tag if no available
0234      * next tag.
0235      */
0236     if (hsq->next_tag == HSQ_INVALID_TAG)
0237         hsq->next_tag = tag;
0238 
0239     hsq->qcnt++;
0240 
0241     spin_unlock_irq(&hsq->lock);
0242 
0243     mmc_hsq_pump_requests(hsq);
0244 
0245     return 0;
0246 }
0247 
0248 static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
0249 {
0250     if (mmc->ops->post_req)
0251         mmc->ops->post_req(mmc, mrq, 0);
0252 }
0253 
0254 static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
0255 {
0256     bool is_idle;
0257 
0258     spin_lock_irq(&hsq->lock);
0259 
0260     is_idle = (!hsq->mrq && !hsq->qcnt) ||
0261         hsq->recovery_halt;
0262 
0263     *ret = hsq->recovery_halt ? -EBUSY : 0;
0264     hsq->waiting_for_idle = !is_idle;
0265 
0266     spin_unlock_irq(&hsq->lock);
0267 
0268     return is_idle;
0269 }
0270 
0271 static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
0272 {
0273     struct mmc_hsq *hsq = mmc->cqe_private;
0274     int ret;
0275 
0276     wait_event(hsq->wait_queue,
0277            mmc_hsq_queue_is_idle(hsq, &ret));
0278 
0279     return ret;
0280 }
0281 
0282 static void mmc_hsq_disable(struct mmc_host *mmc)
0283 {
0284     struct mmc_hsq *hsq = mmc->cqe_private;
0285     u32 timeout = 500;
0286     int ret;
0287 
0288     spin_lock_irq(&hsq->lock);
0289 
0290     if (!hsq->enabled) {
0291         spin_unlock_irq(&hsq->lock);
0292         return;
0293     }
0294 
0295     spin_unlock_irq(&hsq->lock);
0296 
0297     ret = wait_event_timeout(hsq->wait_queue,
0298                  mmc_hsq_queue_is_idle(hsq, &ret),
0299                  msecs_to_jiffies(timeout));
0300     if (ret == 0) {
0301         pr_warn("could not stop mmc software queue\n");
0302         return;
0303     }
0304 
0305     spin_lock_irq(&hsq->lock);
0306 
0307     hsq->enabled = false;
0308 
0309     spin_unlock_irq(&hsq->lock);
0310 }
0311 
0312 static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
0313 {
0314     struct mmc_hsq *hsq = mmc->cqe_private;
0315 
0316     spin_lock_irq(&hsq->lock);
0317 
0318     if (hsq->enabled) {
0319         spin_unlock_irq(&hsq->lock);
0320         return -EBUSY;
0321     }
0322 
0323     hsq->enabled = true;
0324 
0325     spin_unlock_irq(&hsq->lock);
0326 
0327     return 0;
0328 }
0329 
0330 static const struct mmc_cqe_ops mmc_hsq_ops = {
0331     .cqe_enable = mmc_hsq_enable,
0332     .cqe_disable = mmc_hsq_disable,
0333     .cqe_request = mmc_hsq_request,
0334     .cqe_post_req = mmc_hsq_post_req,
0335     .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
0336     .cqe_recovery_start = mmc_hsq_recovery_start,
0337     .cqe_recovery_finish = mmc_hsq_recovery_finish,
0338 };
0339 
0340 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
0341 {
0342     hsq->num_slots = HSQ_NUM_SLOTS;
0343     hsq->next_tag = HSQ_INVALID_TAG;
0344 
0345     hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
0346                  sizeof(struct hsq_slot), GFP_KERNEL);
0347     if (!hsq->slot)
0348         return -ENOMEM;
0349 
0350     hsq->mmc = mmc;
0351     hsq->mmc->cqe_private = hsq;
0352     mmc->cqe_ops = &mmc_hsq_ops;
0353 
0354     INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
0355     spin_lock_init(&hsq->lock);
0356     init_waitqueue_head(&hsq->wait_queue);
0357 
0358     return 0;
0359 }
0360 EXPORT_SYMBOL_GPL(mmc_hsq_init);
0361 
0362 void mmc_hsq_suspend(struct mmc_host *mmc)
0363 {
0364     mmc_hsq_disable(mmc);
0365 }
0366 EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
0367 
0368 int mmc_hsq_resume(struct mmc_host *mmc)
0369 {
0370     return mmc_hsq_enable(mmc, NULL);
0371 }
0372 EXPORT_SYMBOL_GPL(mmc_hsq_resume);
0373 
0374 MODULE_DESCRIPTION("MMC Host Software Queue support");
0375 MODULE_LICENSE("GPL v2");