Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: ISC
0002 /* Copyright (C) 2020 MediaTek Inc.
0003  *
0004  * This file is written based on mt76/usb.c.
0005  *
0006  * Author: Felix Fietkau <nbd@nbd.name>
0007  *     Lorenzo Bianconi <lorenzo@kernel.org>
0008  *     Sean Wang <sean.wang@mediatek.com>
0009  */
0010 
0011 #include <linux/iopoll.h>
0012 #include <linux/kernel.h>
0013 #include <linux/module.h>
0014 #include <linux/mmc/sdio_func.h>
0015 #include <linux/mmc/card.h>
0016 #include <linux/mmc/host.h>
0017 #include <linux/sched.h>
0018 #include <linux/kthread.h>
0019 
0020 #include "mt76.h"
0021 #include "sdio.h"
0022 
0023 static u32 mt76s_read_whisr(struct mt76_dev *dev)
0024 {
0025     return sdio_readl(dev->sdio.func, MCR_WHISR, NULL);
0026 }
0027 
0028 u32 mt76s_read_pcr(struct mt76_dev *dev)
0029 {
0030     struct mt76_sdio *sdio = &dev->sdio;
0031 
0032     return sdio_readl(sdio->func, MCR_WHLPCR, NULL);
0033 }
0034 EXPORT_SYMBOL_GPL(mt76s_read_pcr);
0035 
0036 static u32 mt76s_read_mailbox(struct mt76_dev *dev, u32 offset)
0037 {
0038     struct sdio_func *func = dev->sdio.func;
0039     u32 val = ~0, status;
0040     int err;
0041 
0042     sdio_claim_host(func);
0043 
0044     sdio_writel(func, offset, MCR_H2DSM0R, &err);
0045     if (err < 0) {
0046         dev_err(dev->dev, "failed setting address [err=%d]\n", err);
0047         goto out;
0048     }
0049 
0050     sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err);
0051     if (err < 0) {
0052         dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
0053         goto out;
0054     }
0055 
0056     err = readx_poll_timeout(mt76s_read_whisr, dev, status,
0057                  status & H2D_SW_INT_READ, 0, 1000000);
0058     if (err < 0) {
0059         dev_err(dev->dev, "query whisr timeout\n");
0060         goto out;
0061     }
0062 
0063     sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err);
0064     if (err < 0) {
0065         dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
0066         goto out;
0067     }
0068 
0069     val = sdio_readl(func, MCR_H2DSM0R, &err);
0070     if (err < 0) {
0071         dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
0072         goto out;
0073     }
0074 
0075     if (val != offset) {
0076         dev_err(dev->dev, "register mismatch\n");
0077         val = ~0;
0078         goto out;
0079     }
0080 
0081     val = sdio_readl(func, MCR_D2HRM1R, &err);
0082     if (err < 0)
0083         dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err);
0084 
0085 out:
0086     sdio_release_host(func);
0087 
0088     return val;
0089 }
0090 
0091 static void mt76s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val)
0092 {
0093     struct sdio_func *func = dev->sdio.func;
0094     u32 status;
0095     int err;
0096 
0097     sdio_claim_host(func);
0098 
0099     sdio_writel(func, offset, MCR_H2DSM0R, &err);
0100     if (err < 0) {
0101         dev_err(dev->dev, "failed setting address [err=%d]\n", err);
0102         goto out;
0103     }
0104 
0105     sdio_writel(func, val, MCR_H2DSM1R, &err);
0106     if (err < 0) {
0107         dev_err(dev->dev,
0108             "failed setting write value [err=%d]\n", err);
0109         goto out;
0110     }
0111 
0112     sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err);
0113     if (err < 0) {
0114         dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
0115         goto out;
0116     }
0117 
0118     err = readx_poll_timeout(mt76s_read_whisr, dev, status,
0119                  status & H2D_SW_INT_WRITE, 0, 1000000);
0120     if (err < 0) {
0121         dev_err(dev->dev, "query whisr timeout\n");
0122         goto out;
0123     }
0124 
0125     sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err);
0126     if (err < 0) {
0127         dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
0128         goto out;
0129     }
0130 
0131     val = sdio_readl(func, MCR_H2DSM0R, &err);
0132     if (err < 0) {
0133         dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
0134         goto out;
0135     }
0136 
0137     if (val != offset)
0138         dev_err(dev->dev, "register mismatch\n");
0139 
0140 out:
0141     sdio_release_host(func);
0142 }
0143 
0144 u32 mt76s_rr(struct mt76_dev *dev, u32 offset)
0145 {
0146     if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
0147         return dev->mcu_ops->mcu_rr(dev, offset);
0148     else
0149         return mt76s_read_mailbox(dev, offset);
0150 }
0151 EXPORT_SYMBOL_GPL(mt76s_rr);
0152 
0153 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val)
0154 {
0155     if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
0156         dev->mcu_ops->mcu_wr(dev, offset, val);
0157     else
0158         mt76s_write_mailbox(dev, offset, val);
0159 }
0160 EXPORT_SYMBOL_GPL(mt76s_wr);
0161 
0162 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
0163 {
0164     val |= mt76s_rr(dev, offset) & ~mask;
0165     mt76s_wr(dev, offset, val);
0166 
0167     return val;
0168 }
0169 EXPORT_SYMBOL_GPL(mt76s_rmw);
0170 
0171 void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
0172               const void *data, int len)
0173 {
0174     const u32 *val = data;
0175     int i;
0176 
0177     for (i = 0; i < len / sizeof(u32); i++) {
0178         mt76s_wr(dev, offset, val[i]);
0179         offset += sizeof(u32);
0180     }
0181 }
0182 EXPORT_SYMBOL_GPL(mt76s_write_copy);
0183 
0184 void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
0185              void *data, int len)
0186 {
0187     u32 *val = data;
0188     int i;
0189 
0190     for (i = 0; i < len / sizeof(u32); i++) {
0191         val[i] = mt76s_rr(dev, offset);
0192         offset += sizeof(u32);
0193     }
0194 }
0195 EXPORT_SYMBOL_GPL(mt76s_read_copy);
0196 
0197 int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
0198         const struct mt76_reg_pair *data,
0199         int len)
0200 {
0201     int i;
0202 
0203     for (i = 0; i < len; i++) {
0204         mt76s_wr(dev, data->reg, data->value);
0205         data++;
0206     }
0207 
0208     return 0;
0209 }
0210 EXPORT_SYMBOL_GPL(mt76s_wr_rp);
0211 
0212 int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
0213         struct mt76_reg_pair *data, int len)
0214 {
0215     int i;
0216 
0217     for (i = 0; i < len; i++) {
0218         data->value = mt76s_rr(dev, data->reg);
0219         data++;
0220     }
0221 
0222     return 0;
0223 }
0224 EXPORT_SYMBOL_GPL(mt76s_rd_rp);
0225 
0226 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, int hw_ver)
0227 {
0228     u32 status, ctrl;
0229     int ret;
0230 
0231     dev->sdio.hw_ver = hw_ver;
0232 
0233     sdio_claim_host(func);
0234 
0235     ret = sdio_enable_func(func);
0236     if (ret < 0)
0237         goto release;
0238 
0239     /* Get ownership from the device */
0240     sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR,
0241             MCR_WHLPCR, &ret);
0242     if (ret < 0)
0243         goto disable_func;
0244 
0245     ret = readx_poll_timeout(mt76s_read_pcr, dev, status,
0246                  status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
0247     if (ret < 0) {
0248         dev_err(dev->dev, "Cannot get ownership from device");
0249         goto disable_func;
0250     }
0251 
0252     ret = sdio_set_block_size(func, 512);
0253     if (ret < 0)
0254         goto disable_func;
0255 
0256     /* Enable interrupt */
0257     sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret);
0258     if (ret < 0)
0259         goto disable_func;
0260 
0261     ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN;
0262     if (hw_ver == MT76_CONNAC2_SDIO)
0263         ctrl |= WHIER_RX1_DONE_INT_EN;
0264     sdio_writel(func, ctrl, MCR_WHIER, &ret);
0265     if (ret < 0)
0266         goto disable_func;
0267 
0268     switch (hw_ver) {
0269     case MT76_CONNAC_SDIO:
0270         /* set WHISR as read clear and Rx aggregation number as 16 */
0271         ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16);
0272         break;
0273     default:
0274         ctrl = sdio_readl(func, MCR_WHCR, &ret);
0275         if (ret < 0)
0276             goto disable_func;
0277         ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2;
0278         ctrl &= ~W_INT_CLR_CTRL; /* read clear */
0279         ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0);
0280         break;
0281     }
0282 
0283     sdio_writel(func, ctrl, MCR_WHCR, &ret);
0284     if (ret < 0)
0285         goto disable_func;
0286 
0287     ret = sdio_claim_irq(func, mt76s_sdio_irq);
0288     if (ret < 0)
0289         goto disable_func;
0290 
0291     sdio_release_host(func);
0292 
0293     return 0;
0294 
0295 disable_func:
0296     sdio_disable_func(func);
0297 release:
0298     sdio_release_host(func);
0299 
0300     return ret;
0301 }
0302 EXPORT_SYMBOL_GPL(mt76s_hw_init);
0303 
0304 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
0305 {
0306     struct mt76_queue *q = &dev->q_rx[qid];
0307 
0308     spin_lock_init(&q->lock);
0309     q->entry = devm_kcalloc(dev->dev,
0310                 MT76S_NUM_RX_ENTRIES, sizeof(*q->entry),
0311                 GFP_KERNEL);
0312     if (!q->entry)
0313         return -ENOMEM;
0314 
0315     q->ndesc = MT76S_NUM_RX_ENTRIES;
0316     q->head = q->tail = 0;
0317     q->queued = 0;
0318 
0319     return 0;
0320 }
0321 EXPORT_SYMBOL_GPL(mt76s_alloc_rx_queue);
0322 
0323 static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
0324 {
0325     struct mt76_queue *q;
0326 
0327     q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
0328     if (!q)
0329         return ERR_PTR(-ENOMEM);
0330 
0331     spin_lock_init(&q->lock);
0332     q->entry = devm_kcalloc(dev->dev,
0333                 MT76S_NUM_TX_ENTRIES, sizeof(*q->entry),
0334                 GFP_KERNEL);
0335     if (!q->entry)
0336         return ERR_PTR(-ENOMEM);
0337 
0338     q->ndesc = MT76S_NUM_TX_ENTRIES;
0339 
0340     return q;
0341 }
0342 
0343 int mt76s_alloc_tx(struct mt76_dev *dev)
0344 {
0345     struct mt76_queue *q;
0346     int i;
0347 
0348     for (i = 0; i <= MT_TXQ_PSD; i++) {
0349         q = mt76s_alloc_tx_queue(dev);
0350         if (IS_ERR(q))
0351             return PTR_ERR(q);
0352 
0353         dev->phy.q_tx[i] = q;
0354     }
0355 
0356     q = mt76s_alloc_tx_queue(dev);
0357     if (IS_ERR(q))
0358         return PTR_ERR(q);
0359 
0360     dev->q_mcu[MT_MCUQ_WM] = q;
0361 
0362     return 0;
0363 }
0364 EXPORT_SYMBOL_GPL(mt76s_alloc_tx);
0365 
0366 static struct mt76_queue_entry *
0367 mt76s_get_next_rx_entry(struct mt76_queue *q)
0368 {
0369     struct mt76_queue_entry *e = NULL;
0370 
0371     spin_lock_bh(&q->lock);
0372     if (q->queued > 0) {
0373         e = &q->entry[q->tail];
0374         q->tail = (q->tail + 1) % q->ndesc;
0375         q->queued--;
0376     }
0377     spin_unlock_bh(&q->lock);
0378 
0379     return e;
0380 }
0381 
0382 static int
0383 mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
0384 {
0385     int qid = q - &dev->q_rx[MT_RXQ_MAIN];
0386     int nframes = 0;
0387 
0388     while (true) {
0389         struct mt76_queue_entry *e;
0390 
0391         if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
0392             break;
0393 
0394         e = mt76s_get_next_rx_entry(q);
0395         if (!e || !e->skb)
0396             break;
0397 
0398         dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
0399         e->skb = NULL;
0400         nframes++;
0401     }
0402     if (qid == MT_RXQ_MAIN)
0403         mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
0404 
0405     return nframes;
0406 }
0407 
0408 static void mt76s_net_worker(struct mt76_worker *w)
0409 {
0410     struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
0411                           net_worker);
0412     struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
0413     int i, nframes;
0414 
0415     do {
0416         nframes = 0;
0417 
0418         local_bh_disable();
0419         rcu_read_lock();
0420 
0421         mt76_for_each_q_rx(dev, i)
0422             nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
0423 
0424         rcu_read_unlock();
0425         local_bh_enable();
0426     } while (nframes > 0);
0427 }
0428 
0429 static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
0430 {
0431     struct mt76_queue_entry entry;
0432     int nframes = 0;
0433     bool mcu;
0434 
0435     if (!q)
0436         return 0;
0437 
0438     mcu = q == dev->q_mcu[MT_MCUQ_WM];
0439     while (q->queued > 0) {
0440         if (!q->entry[q->tail].done)
0441             break;
0442 
0443         entry = q->entry[q->tail];
0444         q->entry[q->tail].done = false;
0445 
0446         if (mcu) {
0447             dev_kfree_skb(entry.skb);
0448             entry.skb = NULL;
0449         }
0450 
0451         mt76_queue_tx_complete(dev, q, &entry);
0452         nframes++;
0453     }
0454 
0455     if (!q->queued)
0456         wake_up(&dev->tx_wait);
0457 
0458     return nframes;
0459 }
0460 
0461 static void mt76s_status_worker(struct mt76_worker *w)
0462 {
0463     struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
0464                           status_worker);
0465     struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
0466     bool resched = false;
0467     int i, nframes;
0468 
0469     do {
0470         int ndata_frames = 0;
0471 
0472         nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
0473 
0474         for (i = 0; i <= MT_TXQ_PSD; i++)
0475             ndata_frames += mt76s_process_tx_queue(dev,
0476                                    dev->phy.q_tx[i]);
0477         nframes += ndata_frames;
0478         if (ndata_frames > 0)
0479             resched = true;
0480 
0481         if (dev->drv->tx_status_data &&
0482             !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
0483             !test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
0484             queue_work(dev->wq, &dev->sdio.stat_work);
0485     } while (nframes > 0);
0486 
0487     if (resched)
0488         mt76_worker_schedule(&dev->sdio.txrx_worker);
0489 }
0490 
0491 static void mt76s_tx_status_data(struct work_struct *work)
0492 {
0493     struct mt76_sdio *sdio;
0494     struct mt76_dev *dev;
0495     u8 update = 1;
0496     u16 count = 0;
0497 
0498     sdio = container_of(work, struct mt76_sdio, stat_work);
0499     dev = container_of(sdio, struct mt76_dev, sdio);
0500 
0501     while (true) {
0502         if (test_bit(MT76_REMOVED, &dev->phy.state))
0503             break;
0504 
0505         if (!dev->drv->tx_status_data(dev, &update))
0506             break;
0507         count++;
0508     }
0509 
0510     if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
0511         queue_work(dev->wq, &sdio->stat_work);
0512     else
0513         clear_bit(MT76_READING_STATS, &dev->phy.state);
0514 }
0515 
0516 static int
0517 mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
0518            enum mt76_txq_id qid, struct sk_buff *skb,
0519            struct mt76_wcid *wcid, struct ieee80211_sta *sta)
0520 {
0521     struct mt76_tx_info tx_info = {
0522         .skb = skb,
0523     };
0524     int err, len = skb->len;
0525     u16 idx = q->head;
0526 
0527     if (q->queued == q->ndesc)
0528         return -ENOSPC;
0529 
0530     skb->prev = skb->next = NULL;
0531     err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
0532     if (err < 0)
0533         return err;
0534 
0535     q->entry[q->head].skb = tx_info.skb;
0536     q->entry[q->head].buf_sz = len;
0537     q->entry[q->head].wcid = 0xffff;
0538 
0539     smp_wmb();
0540 
0541     q->head = (q->head + 1) % q->ndesc;
0542     q->queued++;
0543 
0544     return idx;
0545 }
0546 
0547 static int
0548 mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
0549                struct sk_buff *skb, u32 tx_info)
0550 {
0551     int ret = -ENOSPC, len = skb->len, pad;
0552 
0553     if (q->queued == q->ndesc)
0554         goto error;
0555 
0556     pad = round_up(skb->len, 4) - skb->len;
0557     ret = mt76_skb_adjust_pad(skb, pad);
0558     if (ret)
0559         goto error;
0560 
0561     spin_lock_bh(&q->lock);
0562 
0563     q->entry[q->head].buf_sz = len;
0564     q->entry[q->head].skb = skb;
0565     q->head = (q->head + 1) % q->ndesc;
0566     q->queued++;
0567 
0568     spin_unlock_bh(&q->lock);
0569 
0570     return 0;
0571 
0572 error:
0573     dev_kfree_skb(skb);
0574 
0575     return ret;
0576 }
0577 
0578 static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
0579 {
0580     struct mt76_sdio *sdio = &dev->sdio;
0581 
0582     mt76_worker_schedule(&sdio->txrx_worker);
0583 }
0584 
0585 static const struct mt76_queue_ops sdio_queue_ops = {
0586     .tx_queue_skb = mt76s_tx_queue_skb,
0587     .kick = mt76s_tx_kick,
0588     .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
0589 };
0590 
0591 void mt76s_deinit(struct mt76_dev *dev)
0592 {
0593     struct mt76_sdio *sdio = &dev->sdio;
0594     int i;
0595 
0596     mt76_worker_teardown(&sdio->txrx_worker);
0597     mt76_worker_teardown(&sdio->status_worker);
0598     mt76_worker_teardown(&sdio->net_worker);
0599 
0600     cancel_work_sync(&sdio->stat_work);
0601     clear_bit(MT76_READING_STATS, &dev->phy.state);
0602 
0603     mt76_tx_status_check(dev, true);
0604 
0605     sdio_claim_host(sdio->func);
0606     sdio_release_irq(sdio->func);
0607     sdio_release_host(sdio->func);
0608 
0609     mt76_for_each_q_rx(dev, i) {
0610         struct mt76_queue *q = &dev->q_rx[i];
0611         int j;
0612 
0613         for (j = 0; j < q->ndesc; j++) {
0614             struct mt76_queue_entry *e = &q->entry[j];
0615 
0616             if (!e->skb)
0617                 continue;
0618 
0619             dev_kfree_skb(e->skb);
0620             e->skb = NULL;
0621         }
0622     }
0623 }
0624 EXPORT_SYMBOL_GPL(mt76s_deinit);
0625 
0626 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
0627            const struct mt76_bus_ops *bus_ops)
0628 {
0629     struct mt76_sdio *sdio = &dev->sdio;
0630     u32 host_max_cap;
0631     int err;
0632 
0633     err = mt76_worker_setup(dev->hw, &sdio->status_worker,
0634                 mt76s_status_worker, "sdio-status");
0635     if (err)
0636         return err;
0637 
0638     err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker,
0639                 "sdio-net");
0640     if (err)
0641         return err;
0642 
0643     sched_set_fifo_low(sdio->status_worker.task);
0644     sched_set_fifo_low(sdio->net_worker.task);
0645 
0646     INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
0647 
0648     dev->queue_ops = &sdio_queue_ops;
0649     dev->bus = bus_ops;
0650     dev->sdio.func = func;
0651 
0652     host_max_cap = min_t(u32, func->card->host->max_req_size,
0653                  func->cur_blksize *
0654                  func->card->host->max_blk_count);
0655     dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ);
0656     dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz,
0657                       GFP_KERNEL);
0658     if (!dev->sdio.xmit_buf)
0659         err = -ENOMEM;
0660 
0661     return err;
0662 }
0663 EXPORT_SYMBOL_GPL(mt76s_init);
0664 
0665 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
0666 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
0667 MODULE_LICENSE("Dual BSD/GPL");