Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
0004  */
0005 
0006 #include "mt7601u.h"
0007 #include "dma.h"
0008 #include "usb.h"
0009 #include "trace.h"
0010 
0011 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
0012                  struct mt7601u_dma_buf_rx *e, gfp_t gfp);
0013 
0014 static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
0015 {
0016     const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
0017     unsigned int hdrlen;
0018 
0019     if (unlikely(len < 10))
0020         return 0;
0021     hdrlen = ieee80211_hdrlen(hdr->frame_control);
0022     if (unlikely(hdrlen > len))
0023         return 0;
0024     return hdrlen;
0025 }
0026 
0027 static struct sk_buff *
0028 mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
0029             void *data, u32 seg_len, u32 truesize, struct page *p)
0030 {
0031     struct sk_buff *skb;
0032     u32 true_len, hdr_len = 0, copy, frag;
0033 
0034     skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
0035     if (!skb)
0036         return NULL;
0037 
0038     true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
0039     if (!true_len || true_len > seg_len)
0040         goto bad_frame;
0041 
0042     hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
0043     if (!hdr_len)
0044         goto bad_frame;
0045 
0046     if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
0047         skb_put_data(skb, data, hdr_len);
0048 
0049         data += hdr_len + 2;
0050         true_len -= hdr_len;
0051         hdr_len = 0;
0052     }
0053 
0054     /* If not doing paged RX allocated skb will always have enough space */
0055     copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
0056     frag = true_len - copy;
0057 
0058     skb_put_data(skb, data, copy);
0059     data += copy;
0060 
0061     if (frag) {
0062         skb_add_rx_frag(skb, 0, p, data - page_address(p),
0063                 frag, truesize);
0064         get_page(p);
0065     }
0066 
0067     return skb;
0068 
0069 bad_frame:
0070     dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
0071                 true_len, hdr_len);
0072     dev_kfree_skb(skb);
0073     return NULL;
0074 }
0075 
0076 static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
0077                    u32 seg_len, struct page *p,
0078                    struct list_head *list)
0079 {
0080     struct sk_buff *skb;
0081     struct mt7601u_rxwi *rxwi;
0082     u32 fce_info, truesize = seg_len;
0083 
0084     /* DMA_INFO field at the beginning of the segment contains only some of
0085      * the information, we need to read the FCE descriptor from the end.
0086      */
0087     fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
0088     seg_len -= MT_FCE_INFO_LEN;
0089 
0090     data += MT_DMA_HDR_LEN;
0091     seg_len -= MT_DMA_HDR_LEN;
0092 
0093     rxwi = (struct mt7601u_rxwi *) data;
0094     data += sizeof(struct mt7601u_rxwi);
0095     seg_len -= sizeof(struct mt7601u_rxwi);
0096 
0097     if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
0098         dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
0099     if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
0100         dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
0101 
0102     trace_mt_rx(dev, rxwi, fce_info);
0103 
0104     skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
0105     if (!skb)
0106         return;
0107 
0108     local_bh_disable();
0109     rcu_read_lock();
0110 
0111     ieee80211_rx_list(dev->hw, NULL, skb, list);
0112 
0113     rcu_read_unlock();
0114     local_bh_enable();
0115 }
0116 
0117 static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
0118 {
0119     u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
0120         sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
0121     u16 dma_len = get_unaligned_le16(data);
0122 
0123     if (data_len < min_seg_len ||
0124         WARN_ON_ONCE(!dma_len) ||
0125         WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
0126         WARN_ON_ONCE(dma_len & 0x3))
0127         return 0;
0128 
0129     return MT_DMA_HDRS + dma_len;
0130 }
0131 
0132 static void
0133 mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
0134 {
0135     u32 seg_len, data_len = e->urb->actual_length;
0136     u8 *data = page_address(e->p);
0137     struct page *new_p = NULL;
0138     LIST_HEAD(list);
0139     int cnt = 0;
0140 
0141     if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
0142         return;
0143 
0144     /* Copy if there is very little data in the buffer. */
0145     if (data_len > 512)
0146         new_p = dev_alloc_pages(MT_RX_ORDER);
0147 
0148     while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
0149         mt7601u_rx_process_seg(dev, data, seg_len,
0150                        new_p ? e->p : NULL, &list);
0151 
0152         data_len -= seg_len;
0153         data += seg_len;
0154         cnt++;
0155     }
0156 
0157     if (cnt > 1)
0158         trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
0159 
0160     netif_receive_skb_list(&list);
0161 
0162     if (new_p) {
0163         /* we have one extra ref from the allocator */
0164         put_page(e->p);
0165         e->p = new_p;
0166     }
0167 }
0168 
0169 static struct mt7601u_dma_buf_rx *
0170 mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
0171 {
0172     struct mt7601u_rx_queue *q = &dev->rx_q;
0173     struct mt7601u_dma_buf_rx *buf = NULL;
0174     unsigned long flags;
0175 
0176     spin_lock_irqsave(&dev->rx_lock, flags);
0177 
0178     if (!q->pending)
0179         goto out;
0180 
0181     buf = &q->e[q->start];
0182     q->pending--;
0183     q->start = (q->start + 1) % q->entries;
0184 out:
0185     spin_unlock_irqrestore(&dev->rx_lock, flags);
0186 
0187     return buf;
0188 }
0189 
0190 static void mt7601u_complete_rx(struct urb *urb)
0191 {
0192     struct mt7601u_dev *dev = urb->context;
0193     struct mt7601u_rx_queue *q = &dev->rx_q;
0194     unsigned long flags;
0195 
0196     /* do no schedule rx tasklet if urb has been unlinked
0197      * or the device has been removed
0198      */
0199     switch (urb->status) {
0200     case -ECONNRESET:
0201     case -ESHUTDOWN:
0202     case -ENOENT:
0203     case -EPROTO:
0204         return;
0205     default:
0206         dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
0207                     urb->status);
0208         fallthrough;
0209     case 0:
0210         break;
0211     }
0212 
0213     spin_lock_irqsave(&dev->rx_lock, flags);
0214     if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
0215         goto out;
0216 
0217     q->end = (q->end + 1) % q->entries;
0218     q->pending++;
0219     tasklet_schedule(&dev->rx_tasklet);
0220 out:
0221     spin_unlock_irqrestore(&dev->rx_lock, flags);
0222 }
0223 
0224 static void mt7601u_rx_tasklet(struct tasklet_struct *t)
0225 {
0226     struct mt7601u_dev *dev = from_tasklet(dev, t, rx_tasklet);
0227     struct mt7601u_dma_buf_rx *e;
0228 
0229     while ((e = mt7601u_rx_get_pending_entry(dev))) {
0230         if (e->urb->status)
0231             continue;
0232 
0233         mt7601u_rx_process_entry(dev, e);
0234         mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
0235     }
0236 }
0237 
0238 static void mt7601u_complete_tx(struct urb *urb)
0239 {
0240     struct mt7601u_tx_queue *q = urb->context;
0241     struct mt7601u_dev *dev = q->dev;
0242     struct sk_buff *skb;
0243     unsigned long flags;
0244 
0245     switch (urb->status) {
0246     case -ECONNRESET:
0247     case -ESHUTDOWN:
0248     case -ENOENT:
0249     case -EPROTO:
0250         return;
0251     default:
0252         dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
0253                     urb->status);
0254         fallthrough;
0255     case 0:
0256         break;
0257     }
0258 
0259     spin_lock_irqsave(&dev->tx_lock, flags);
0260     if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
0261         goto out;
0262 
0263     skb = q->e[q->start].skb;
0264     q->e[q->start].skb = NULL;
0265     trace_mt_tx_dma_done(dev, skb);
0266 
0267     __skb_queue_tail(&dev->tx_skb_done, skb);
0268     tasklet_schedule(&dev->tx_tasklet);
0269 
0270     if (q->used == q->entries - q->entries / 8)
0271         ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
0272 
0273     q->start = (q->start + 1) % q->entries;
0274     q->used--;
0275 out:
0276     spin_unlock_irqrestore(&dev->tx_lock, flags);
0277 }
0278 
0279 static void mt7601u_tx_tasklet(struct tasklet_struct *t)
0280 {
0281     struct mt7601u_dev *dev = from_tasklet(dev, t, tx_tasklet);
0282     struct sk_buff_head skbs;
0283     unsigned long flags;
0284 
0285     __skb_queue_head_init(&skbs);
0286 
0287     spin_lock_irqsave(&dev->tx_lock, flags);
0288 
0289     set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
0290     if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
0291         queue_delayed_work(dev->stat_wq, &dev->stat_work,
0292                    msecs_to_jiffies(10));
0293 
0294     skb_queue_splice_init(&dev->tx_skb_done, &skbs);
0295 
0296     spin_unlock_irqrestore(&dev->tx_lock, flags);
0297 
0298     while (!skb_queue_empty(&skbs)) {
0299         struct sk_buff *skb = __skb_dequeue(&skbs);
0300 
0301         mt7601u_tx_status(dev, skb);
0302     }
0303 }
0304 
0305 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
0306                  struct sk_buff *skb, u8 ep)
0307 {
0308     struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
0309     unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
0310     struct mt7601u_dma_buf_tx *e;
0311     struct mt7601u_tx_queue *q = &dev->tx_q[ep];
0312     unsigned long flags;
0313     int ret;
0314 
0315     spin_lock_irqsave(&dev->tx_lock, flags);
0316 
0317     if (WARN_ON(q->entries <= q->used)) {
0318         ret = -ENOSPC;
0319         goto out;
0320     }
0321 
0322     e = &q->e[q->end];
0323     usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
0324               mt7601u_complete_tx, q);
0325     ret = usb_submit_urb(e->urb, GFP_ATOMIC);
0326     if (ret) {
0327         /* Special-handle ENODEV from TX urb submission because it will
0328          * often be the first ENODEV we see after device is removed.
0329          */
0330         if (ret == -ENODEV)
0331             set_bit(MT7601U_STATE_REMOVED, &dev->state);
0332         else
0333             dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
0334                 ret);
0335         goto out;
0336     }
0337 
0338     q->end = (q->end + 1) % q->entries;
0339     q->used++;
0340     e->skb = skb;
0341 
0342     if (q->used >= q->entries)
0343         ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
0344 out:
0345     spin_unlock_irqrestore(&dev->tx_lock, flags);
0346 
0347     return ret;
0348 }
0349 
0350 /* Map hardware Q to USB endpoint number */
0351 static u8 q2ep(u8 qid)
0352 {
0353     /* TODO: take management packets to queue 5 */
0354     return qid + 1;
0355 }
0356 
0357 /* Map USB endpoint number to Q id in the DMA engine */
0358 static enum mt76_qsel ep2dmaq(u8 ep)
0359 {
0360     if (ep == 5)
0361         return MT_QSEL_MGMT;
0362     return MT_QSEL_EDCA;
0363 }
0364 
0365 int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
0366                struct mt76_wcid *wcid, int hw_q)
0367 {
0368     u8 ep = q2ep(hw_q);
0369     u32 dma_flags;
0370     int ret;
0371 
0372     dma_flags = MT_TXD_PKT_INFO_80211;
0373     if (wcid->hw_key_idx == 0xff)
0374         dma_flags |= MT_TXD_PKT_INFO_WIV;
0375 
0376     ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
0377     if (ret)
0378         return ret;
0379 
0380     ret = mt7601u_dma_submit_tx(dev, skb, ep);
0381     if (ret) {
0382         ieee80211_free_txskb(dev->hw, skb);
0383         return ret;
0384     }
0385 
0386     return 0;
0387 }
0388 
0389 static void mt7601u_kill_rx(struct mt7601u_dev *dev)
0390 {
0391     int i;
0392 
0393     for (i = 0; i < dev->rx_q.entries; i++)
0394         usb_poison_urb(dev->rx_q.e[i].urb);
0395 }
0396 
0397 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
0398                  struct mt7601u_dma_buf_rx *e, gfp_t gfp)
0399 {
0400     struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
0401     u8 *buf = page_address(e->p);
0402     unsigned pipe;
0403     int ret;
0404 
0405     pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
0406 
0407     usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
0408               mt7601u_complete_rx, dev);
0409 
0410     trace_mt_submit_urb(dev, e->urb);
0411     ret = usb_submit_urb(e->urb, gfp);
0412     if (ret)
0413         dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
0414 
0415     return ret;
0416 }
0417 
0418 static int mt7601u_submit_rx(struct mt7601u_dev *dev)
0419 {
0420     int i, ret;
0421 
0422     for (i = 0; i < dev->rx_q.entries; i++) {
0423         ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
0424         if (ret)
0425             return ret;
0426     }
0427 
0428     return 0;
0429 }
0430 
0431 static void mt7601u_free_rx(struct mt7601u_dev *dev)
0432 {
0433     int i;
0434 
0435     for (i = 0; i < dev->rx_q.entries; i++) {
0436         __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
0437         usb_free_urb(dev->rx_q.e[i].urb);
0438     }
0439 }
0440 
0441 static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
0442 {
0443     int i;
0444 
0445     memset(&dev->rx_q, 0, sizeof(dev->rx_q));
0446     dev->rx_q.dev = dev;
0447     dev->rx_q.entries = N_RX_ENTRIES;
0448 
0449     for (i = 0; i < N_RX_ENTRIES; i++) {
0450         dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
0451         dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
0452 
0453         if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
0454             return -ENOMEM;
0455     }
0456 
0457     return 0;
0458 }
0459 
0460 static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
0461 {
0462     int i;
0463 
0464     for (i = 0; i < q->entries; i++)  {
0465         usb_poison_urb(q->e[i].urb);
0466         if (q->e[i].skb)
0467             mt7601u_tx_status(q->dev, q->e[i].skb);
0468         usb_free_urb(q->e[i].urb);
0469     }
0470 }
0471 
0472 static void mt7601u_free_tx(struct mt7601u_dev *dev)
0473 {
0474     int i;
0475 
0476     if (!dev->tx_q)
0477         return;
0478 
0479     for (i = 0; i < __MT_EP_OUT_MAX; i++)
0480         mt7601u_free_tx_queue(&dev->tx_q[i]);
0481 }
0482 
0483 static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
0484                   struct mt7601u_tx_queue *q)
0485 {
0486     int i;
0487 
0488     q->dev = dev;
0489     q->entries = N_TX_ENTRIES;
0490 
0491     for (i = 0; i < N_TX_ENTRIES; i++) {
0492         q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
0493         if (!q->e[i].urb)
0494             return -ENOMEM;
0495     }
0496 
0497     return 0;
0498 }
0499 
0500 static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
0501 {
0502     int i;
0503 
0504     dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
0505                  sizeof(*dev->tx_q), GFP_KERNEL);
0506     if (!dev->tx_q)
0507         return -ENOMEM;
0508 
0509     for (i = 0; i < __MT_EP_OUT_MAX; i++)
0510         if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
0511             return -ENOMEM;
0512 
0513     return 0;
0514 }
0515 
0516 int mt7601u_dma_init(struct mt7601u_dev *dev)
0517 {
0518     int ret;
0519 
0520     tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet);
0521     tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet);
0522 
0523     ret = mt7601u_alloc_tx(dev);
0524     if (ret)
0525         goto err;
0526     ret = mt7601u_alloc_rx(dev);
0527     if (ret)
0528         goto err;
0529 
0530     ret = mt7601u_submit_rx(dev);
0531     if (ret)
0532         goto err;
0533 
0534     return 0;
0535 err:
0536     mt7601u_dma_cleanup(dev);
0537     return ret;
0538 }
0539 
0540 void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
0541 {
0542     mt7601u_kill_rx(dev);
0543 
0544     tasklet_kill(&dev->rx_tasklet);
0545 
0546     mt7601u_free_rx(dev);
0547     mt7601u_free_tx(dev);
0548 
0549     tasklet_kill(&dev->tx_tasklet);
0550 }