Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: ISC
0002 /* Copyright (C) 2020 MediaTek Inc.
0003  *
0004  * Author: Felix Fietkau <nbd@nbd.name>
0005  *     Lorenzo Bianconi <lorenzo@kernel.org>
0006  *     Sean Wang <sean.wang@mediatek.com>
0007  */
0008 
0009 #include <linux/kernel.h>
0010 #include <linux/iopoll.h>
0011 #include <linux/module.h>
0012 
0013 #include <linux/mmc/host.h>
0014 #include <linux/mmc/sdio_ids.h>
0015 #include <linux/mmc/sdio_func.h>
0016 
0017 #include "trace.h"
0018 #include "sdio.h"
0019 #include "mt76.h"
0020 
0021 static int mt76s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
0022 {
0023     u32 ple_ac_data_quota[] = {
0024         FIELD_GET(TXQ_CNT_L, data[4]), /* VO */
0025         FIELD_GET(TXQ_CNT_H, data[3]), /* VI */
0026         FIELD_GET(TXQ_CNT_L, data[3]), /* BE */
0027         FIELD_GET(TXQ_CNT_H, data[2]), /* BK */
0028     };
0029     u32 pse_ac_data_quota[] = {
0030         FIELD_GET(TXQ_CNT_H, data[1]), /* VO */
0031         FIELD_GET(TXQ_CNT_L, data[1]), /* VI */
0032         FIELD_GET(TXQ_CNT_H, data[0]), /* BE */
0033         FIELD_GET(TXQ_CNT_L, data[0]), /* BK */
0034     };
0035     u32 pse_mcu_quota = FIELD_GET(TXQ_CNT_L, data[2]);
0036     u32 pse_data_quota = 0, ple_data_quota = 0;
0037     struct mt76_sdio *sdio = &dev->sdio;
0038     int i;
0039 
0040     for (i = 0; i < ARRAY_SIZE(pse_ac_data_quota); i++) {
0041         pse_data_quota += pse_ac_data_quota[i];
0042         ple_data_quota += ple_ac_data_quota[i];
0043     }
0044 
0045     if (!pse_data_quota && !ple_data_quota && !pse_mcu_quota)
0046         return 0;
0047 
0048     sdio->sched.pse_mcu_quota += pse_mcu_quota;
0049     sdio->sched.pse_data_quota += pse_data_quota;
0050     sdio->sched.ple_data_quota += ple_data_quota;
0051 
0052     return pse_data_quota + ple_data_quota + pse_mcu_quota;
0053 }
0054 
0055 static struct sk_buff *
0056 mt76s_build_rx_skb(void *data, int data_len, int buf_len)
0057 {
0058     int len = min_t(int, data_len, MT_SKB_HEAD_LEN);
0059     struct sk_buff *skb;
0060 
0061     skb = alloc_skb(len, GFP_KERNEL);
0062     if (!skb)
0063         return NULL;
0064 
0065     skb_put_data(skb, data, len);
0066     if (data_len > len) {
0067         struct page *page;
0068 
0069         data += len;
0070         page = virt_to_head_page(data);
0071         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
0072                 page, data - page_address(page),
0073                 data_len - len, buf_len);
0074         get_page(page);
0075     }
0076 
0077     return skb;
0078 }
0079 
0080 static int
0081 mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
0082            struct mt76s_intr *intr)
0083 {
0084     struct mt76_queue *q = &dev->q_rx[qid];
0085     struct mt76_sdio *sdio = &dev->sdio;
0086     int len = 0, err, i;
0087     struct page *page;
0088     u8 *buf;
0089 
0090     for (i = 0; i < intr->rx.num[qid]; i++)
0091         len += round_up(intr->rx.len[qid][i] + 4, 4);
0092 
0093     if (!len)
0094         return 0;
0095 
0096     if (len > sdio->func->cur_blksize)
0097         len = roundup(len, sdio->func->cur_blksize);
0098 
0099     page = __dev_alloc_pages(GFP_KERNEL, get_order(len));
0100     if (!page)
0101         return -ENOMEM;
0102 
0103     buf = page_address(page);
0104 
0105     sdio_claim_host(sdio->func);
0106     err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
0107     sdio_release_host(sdio->func);
0108 
0109     if (err < 0) {
0110         dev_err(dev->dev, "sdio read data failed:%d\n", err);
0111         put_page(page);
0112         return err;
0113     }
0114 
0115     for (i = 0; i < intr->rx.num[qid]; i++) {
0116         int index = (q->head + i) % q->ndesc;
0117         struct mt76_queue_entry *e = &q->entry[index];
0118         __le32 *rxd = (__le32 *)buf;
0119 
0120         /* parse rxd to get the actual packet length */
0121         len = le32_get_bits(rxd[0], GENMASK(15, 0));
0122         e->skb = mt76s_build_rx_skb(buf, len, round_up(len + 4, 4));
0123         if (!e->skb)
0124             break;
0125 
0126         buf += round_up(len + 4, 4);
0127         if (q->queued + i + 1 == q->ndesc)
0128             break;
0129     }
0130     put_page(page);
0131 
0132     spin_lock_bh(&q->lock);
0133     q->head = (q->head + i) % q->ndesc;
0134     q->queued += i;
0135     spin_unlock_bh(&q->lock);
0136 
0137     return i;
0138 }
0139 
0140 static int mt76s_rx_handler(struct mt76_dev *dev)
0141 {
0142     struct mt76_sdio *sdio = &dev->sdio;
0143     struct mt76s_intr intr;
0144     int nframes = 0, ret;
0145 
0146     ret = sdio->parse_irq(dev, &intr);
0147     if (ret)
0148         return ret;
0149 
0150     trace_dev_irq(dev, intr.isr, 0);
0151 
0152     if (intr.isr & WHIER_RX0_DONE_INT_EN) {
0153         ret = mt76s_rx_run_queue(dev, 0, &intr);
0154         if (ret > 0) {
0155             mt76_worker_schedule(&sdio->net_worker);
0156             nframes += ret;
0157         }
0158     }
0159 
0160     if (intr.isr & WHIER_RX1_DONE_INT_EN) {
0161         ret = mt76s_rx_run_queue(dev, 1, &intr);
0162         if (ret > 0) {
0163             mt76_worker_schedule(&sdio->net_worker);
0164             nframes += ret;
0165         }
0166     }
0167 
0168     nframes += !!mt76s_refill_sched_quota(dev, intr.tx.wtqcr);
0169 
0170     return nframes;
0171 }
0172 
0173 static int
0174 mt76s_tx_pick_quota(struct mt76_sdio *sdio, bool mcu, int buf_sz,
0175             int *pse_size, int *ple_size)
0176 {
0177     int pse_sz;
0178 
0179     pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit,
0180                   sdio->sched.pse_page_size);
0181 
0182     if (mcu && sdio->hw_ver == MT76_CONNAC2_SDIO)
0183         pse_sz = 1;
0184 
0185     if (mcu) {
0186         if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz)
0187             return -EBUSY;
0188     } else {
0189         if (sdio->sched.pse_data_quota < *pse_size + pse_sz ||
0190             sdio->sched.ple_data_quota < *ple_size + 1)
0191             return -EBUSY;
0192 
0193         *ple_size = *ple_size + 1;
0194     }
0195     *pse_size = *pse_size + pse_sz;
0196 
0197     return 0;
0198 }
0199 
0200 static void
0201 mt76s_tx_update_quota(struct mt76_sdio *sdio, bool mcu, int pse_size,
0202               int ple_size)
0203 {
0204     if (mcu) {
0205         sdio->sched.pse_mcu_quota -= pse_size;
0206     } else {
0207         sdio->sched.pse_data_quota -= pse_size;
0208         sdio->sched.ple_data_quota -= ple_size;
0209     }
0210 }
0211 
0212 static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
0213 {
0214     struct mt76_sdio *sdio = &dev->sdio;
0215     int err;
0216 
0217     if (len > sdio->func->cur_blksize)
0218         len = roundup(len, sdio->func->cur_blksize);
0219 
0220     sdio_claim_host(sdio->func);
0221     err = sdio_writesb(sdio->func, MCR_WTDR1, data, len);
0222     sdio_release_host(sdio->func);
0223 
0224     if (err)
0225         dev_err(dev->dev, "sdio write failed: %d\n", err);
0226 
0227     return err;
0228 }
0229 
0230 static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
0231 {
0232     int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
0233     bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
0234     struct mt76_sdio *sdio = &dev->sdio;
0235     u8 pad;
0236 
0237     while (q->first != q->head) {
0238         struct mt76_queue_entry *e = &q->entry[q->first];
0239         struct sk_buff *iter;
0240 
0241         smp_rmb();
0242 
0243         if (test_bit(MT76_MCU_RESET, &dev->phy.state))
0244             goto next;
0245 
0246         if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
0247             __skb_put_zero(e->skb, 4);
0248             err = __mt76s_xmit_queue(dev, e->skb->data,
0249                          e->skb->len);
0250             if (err)
0251                 return err;
0252 
0253             goto next;
0254         }
0255 
0256         pad = roundup(e->skb->len, 4) - e->skb->len;
0257         if (len + e->skb->len + pad + 4 > dev->sdio.xmit_buf_sz)
0258             break;
0259 
0260         if (mt76s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
0261                     &ple_sz))
0262             break;
0263 
0264         memcpy(sdio->xmit_buf + len, e->skb->data, skb_headlen(e->skb));
0265         len += skb_headlen(e->skb);
0266         nframes++;
0267 
0268         skb_walk_frags(e->skb, iter) {
0269             memcpy(sdio->xmit_buf + len, iter->data, iter->len);
0270             len += iter->len;
0271             nframes++;
0272         }
0273 
0274         if (unlikely(pad)) {
0275             memset(sdio->xmit_buf + len, 0, pad);
0276             len += pad;
0277         }
0278 next:
0279         q->first = (q->first + 1) % q->ndesc;
0280         e->done = true;
0281     }
0282 
0283     if (nframes) {
0284         memset(sdio->xmit_buf + len, 0, 4);
0285         err = __mt76s_xmit_queue(dev, sdio->xmit_buf, len + 4);
0286         if (err)
0287             return err;
0288     }
0289     mt76s_tx_update_quota(sdio, mcu, pse_sz, ple_sz);
0290 
0291     mt76_worker_schedule(&sdio->status_worker);
0292 
0293     return nframes;
0294 }
0295 
0296 void mt76s_txrx_worker(struct mt76_sdio *sdio)
0297 {
0298     struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
0299     int i, nframes, ret;
0300 
0301     /* disable interrupt */
0302     sdio_claim_host(sdio->func);
0303     sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
0304     sdio_release_host(sdio->func);
0305 
0306     do {
0307         nframes = 0;
0308 
0309         /* tx */
0310         for (i = 0; i <= MT_TXQ_PSD; i++) {
0311             ret = mt76s_tx_run_queue(dev, dev->phy.q_tx[i]);
0312             if (ret > 0)
0313                 nframes += ret;
0314         }
0315         ret = mt76s_tx_run_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
0316         if (ret > 0)
0317             nframes += ret;
0318 
0319         /* rx */
0320         ret = mt76s_rx_handler(dev);
0321         if (ret > 0)
0322             nframes += ret;
0323 
0324         if (test_bit(MT76_MCU_RESET, &dev->phy.state) ||
0325             test_bit(MT76_STATE_SUSPEND, &dev->phy.state)) {
0326             if (!mt76s_txqs_empty(dev))
0327                 continue;
0328             else
0329                 wake_up(&sdio->wait);
0330         }
0331     } while (nframes > 0);
0332 
0333     /* enable interrupt */
0334     sdio_claim_host(sdio->func);
0335     sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
0336     sdio_release_host(sdio->func);
0337 }
0338 EXPORT_SYMBOL_GPL(mt76s_txrx_worker);
0339 
0340 void mt76s_sdio_irq(struct sdio_func *func)
0341 {
0342     struct mt76_dev *dev = sdio_get_drvdata(func);
0343     struct mt76_sdio *sdio = &dev->sdio;
0344 
0345     if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state) ||
0346         test_bit(MT76_MCU_RESET, &dev->phy.state))
0347         return;
0348 
0349     sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
0350     mt76_worker_schedule(&sdio->txrx_worker);
0351 }
0352 EXPORT_SYMBOL_GPL(mt76s_sdio_irq);
0353 
0354 bool mt76s_txqs_empty(struct mt76_dev *dev)
0355 {
0356     struct mt76_queue *q;
0357     int i;
0358 
0359     for (i = 0; i <= MT_TXQ_PSD + 1; i++) {
0360         if (i <= MT_TXQ_PSD)
0361             q = dev->phy.q_tx[i];
0362         else
0363             q = dev->q_mcu[MT_MCUQ_WM];
0364 
0365         if (q->first != q->head)
0366             return false;
0367     }
0368 
0369     return true;
0370 }
0371 EXPORT_SYMBOL_GPL(mt76s_txqs_empty);