Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: ISC
0002 /*
0003  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
0004  */
0005 
0006 #include <linux/module.h>
0007 #include "mt76.h"
0008 #include "usb_trace.h"
0009 #include "dma.h"
0010 
0011 #define MT_VEND_REQ_MAX_RETRY   10
0012 #define MT_VEND_REQ_TOUT_MS 300
0013 
0014 static bool disable_usb_sg;
0015 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
0016 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
0017 
0018 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
0019                u16 val, u16 offset, void *buf, size_t len)
0020 {
0021     struct usb_interface *uintf = to_usb_interface(dev->dev);
0022     struct usb_device *udev = interface_to_usbdev(uintf);
0023     unsigned int pipe;
0024     int i, ret;
0025 
0026     lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
0027 
0028     pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
0029                        : usb_sndctrlpipe(udev, 0);
0030     for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
0031         if (test_bit(MT76_REMOVED, &dev->phy.state))
0032             return -EIO;
0033 
0034         ret = usb_control_msg(udev, pipe, req, req_type, val,
0035                       offset, buf, len, MT_VEND_REQ_TOUT_MS);
0036         if (ret == -ENODEV)
0037             set_bit(MT76_REMOVED, &dev->phy.state);
0038         if (ret >= 0 || ret == -ENODEV)
0039             return ret;
0040         usleep_range(5000, 10000);
0041     }
0042 
0043     dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
0044         req, offset, ret);
0045     return ret;
0046 }
0047 EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
0048 
0049 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
0050              u8 req_type, u16 val, u16 offset,
0051              void *buf, size_t len)
0052 {
0053     int ret;
0054 
0055     mutex_lock(&dev->usb.usb_ctrl_mtx);
0056     ret = __mt76u_vendor_request(dev, req, req_type,
0057                      val, offset, buf, len);
0058     trace_usb_reg_wr(dev, offset, val);
0059     mutex_unlock(&dev->usb.usb_ctrl_mtx);
0060 
0061     return ret;
0062 }
0063 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
0064 
0065 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
0066 {
0067     struct mt76_usb *usb = &dev->usb;
0068     u32 data = ~0;
0069     int ret;
0070 
0071     ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
0072                      addr, usb->data, sizeof(__le32));
0073     if (ret == sizeof(__le32))
0074         data = get_unaligned_le32(usb->data);
0075     trace_usb_reg_rr(dev, addr, data);
0076 
0077     return data;
0078 }
0079 EXPORT_SYMBOL_GPL(___mt76u_rr);
0080 
0081 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
0082 {
0083     u8 req;
0084 
0085     switch (addr & MT_VEND_TYPE_MASK) {
0086     case MT_VEND_TYPE_EEPROM:
0087         req = MT_VEND_READ_EEPROM;
0088         break;
0089     case MT_VEND_TYPE_CFG:
0090         req = MT_VEND_READ_CFG;
0091         break;
0092     default:
0093         req = MT_VEND_MULTI_READ;
0094         break;
0095     }
0096 
0097     return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
0098                addr & ~MT_VEND_TYPE_MASK);
0099 }
0100 
0101 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
0102 {
0103     u32 ret;
0104 
0105     mutex_lock(&dev->usb.usb_ctrl_mtx);
0106     ret = __mt76u_rr(dev, addr);
0107     mutex_unlock(&dev->usb.usb_ctrl_mtx);
0108 
0109     return ret;
0110 }
0111 
0112 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
0113          u32 addr, u32 val)
0114 {
0115     struct mt76_usb *usb = &dev->usb;
0116 
0117     put_unaligned_le32(val, usb->data);
0118     __mt76u_vendor_request(dev, req, req_type, addr >> 16,
0119                    addr, usb->data, sizeof(__le32));
0120     trace_usb_reg_wr(dev, addr, val);
0121 }
0122 EXPORT_SYMBOL_GPL(___mt76u_wr);
0123 
0124 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
0125 {
0126     u8 req;
0127 
0128     switch (addr & MT_VEND_TYPE_MASK) {
0129     case MT_VEND_TYPE_CFG:
0130         req = MT_VEND_WRITE_CFG;
0131         break;
0132     default:
0133         req = MT_VEND_MULTI_WRITE;
0134         break;
0135     }
0136     ___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
0137             addr & ~MT_VEND_TYPE_MASK, val);
0138 }
0139 
0140 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
0141 {
0142     mutex_lock(&dev->usb.usb_ctrl_mtx);
0143     __mt76u_wr(dev, addr, val);
0144     mutex_unlock(&dev->usb.usb_ctrl_mtx);
0145 }
0146 
0147 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
0148              u32 mask, u32 val)
0149 {
0150     mutex_lock(&dev->usb.usb_ctrl_mtx);
0151     val |= __mt76u_rr(dev, addr) & ~mask;
0152     __mt76u_wr(dev, addr, val);
0153     mutex_unlock(&dev->usb.usb_ctrl_mtx);
0154 
0155     return val;
0156 }
0157 
0158 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
0159                const void *data, int len)
0160 {
0161     struct mt76_usb *usb = &dev->usb;
0162     const u8 *val = data;
0163     int ret;
0164     int current_batch_size;
0165     int i = 0;
0166 
0167     /* Assure that always a multiple of 4 bytes are copied,
0168      * otherwise beacons can be corrupted.
0169      * See: "mt76: round up length on mt76_wr_copy"
0170      * Commit 850e8f6fbd5d0003b0
0171      */
0172     len = round_up(len, 4);
0173 
0174     mutex_lock(&usb->usb_ctrl_mtx);
0175     while (i < len) {
0176         current_batch_size = min_t(int, usb->data_len, len - i);
0177         memcpy(usb->data, val + i, current_batch_size);
0178         ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
0179                          USB_DIR_OUT | USB_TYPE_VENDOR,
0180                          0, offset + i, usb->data,
0181                          current_batch_size);
0182         if (ret < 0)
0183             break;
0184 
0185         i += current_batch_size;
0186     }
0187     mutex_unlock(&usb->usb_ctrl_mtx);
0188 }
0189 
0190 void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
0191              void *data, int len)
0192 {
0193     struct mt76_usb *usb = &dev->usb;
0194     int i = 0, batch_len, ret;
0195     u8 *val = data;
0196 
0197     len = round_up(len, 4);
0198     mutex_lock(&usb->usb_ctrl_mtx);
0199     while (i < len) {
0200         batch_len = min_t(int, usb->data_len, len - i);
0201         ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
0202                          USB_DIR_IN | USB_TYPE_VENDOR,
0203                          (offset + i) >> 16, offset + i,
0204                          usb->data, batch_len);
0205         if (ret < 0)
0206             break;
0207 
0208         memcpy(val + i, usb->data, batch_len);
0209         i += batch_len;
0210     }
0211     mutex_unlock(&usb->usb_ctrl_mtx);
0212 }
0213 EXPORT_SYMBOL_GPL(mt76u_read_copy);
0214 
0215 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
0216              const u16 offset, const u32 val)
0217 {
0218     mutex_lock(&dev->usb.usb_ctrl_mtx);
0219     __mt76u_vendor_request(dev, req,
0220                    USB_DIR_OUT | USB_TYPE_VENDOR,
0221                    val & 0xffff, offset, NULL, 0);
0222     __mt76u_vendor_request(dev, req,
0223                    USB_DIR_OUT | USB_TYPE_VENDOR,
0224                    val >> 16, offset + 2, NULL, 0);
0225     mutex_unlock(&dev->usb.usb_ctrl_mtx);
0226 }
0227 EXPORT_SYMBOL_GPL(mt76u_single_wr);
0228 
0229 static int
0230 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
0231         const struct mt76_reg_pair *data, int len)
0232 {
0233     struct mt76_usb *usb = &dev->usb;
0234 
0235     mutex_lock(&usb->usb_ctrl_mtx);
0236     while (len > 0) {
0237         __mt76u_wr(dev, base + data->reg, data->value);
0238         len--;
0239         data++;
0240     }
0241     mutex_unlock(&usb->usb_ctrl_mtx);
0242 
0243     return 0;
0244 }
0245 
0246 static int
0247 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
0248         const struct mt76_reg_pair *data, int n)
0249 {
0250     if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
0251         return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
0252     else
0253         return mt76u_req_wr_rp(dev, base, data, n);
0254 }
0255 
0256 static int
0257 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
0258         int len)
0259 {
0260     struct mt76_usb *usb = &dev->usb;
0261 
0262     mutex_lock(&usb->usb_ctrl_mtx);
0263     while (len > 0) {
0264         data->value = __mt76u_rr(dev, base + data->reg);
0265         len--;
0266         data++;
0267     }
0268     mutex_unlock(&usb->usb_ctrl_mtx);
0269 
0270     return 0;
0271 }
0272 
0273 static int
0274 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
0275         struct mt76_reg_pair *data, int n)
0276 {
0277     if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
0278         return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
0279     else
0280         return mt76u_req_rd_rp(dev, base, data, n);
0281 }
0282 
0283 static bool mt76u_check_sg(struct mt76_dev *dev)
0284 {
0285     struct usb_interface *uintf = to_usb_interface(dev->dev);
0286     struct usb_device *udev = interface_to_usbdev(uintf);
0287 
0288     return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
0289         (udev->bus->no_sg_constraint ||
0290          udev->speed == USB_SPEED_WIRELESS));
0291 }
0292 
0293 static int
0294 mt76u_set_endpoints(struct usb_interface *intf,
0295             struct mt76_usb *usb)
0296 {
0297     struct usb_host_interface *intf_desc = intf->cur_altsetting;
0298     struct usb_endpoint_descriptor *ep_desc;
0299     int i, in_ep = 0, out_ep = 0;
0300 
0301     for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
0302         ep_desc = &intf_desc->endpoint[i].desc;
0303 
0304         if (usb_endpoint_is_bulk_in(ep_desc) &&
0305             in_ep < __MT_EP_IN_MAX) {
0306             usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
0307             in_ep++;
0308         } else if (usb_endpoint_is_bulk_out(ep_desc) &&
0309                out_ep < __MT_EP_OUT_MAX) {
0310             usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
0311             out_ep++;
0312         }
0313     }
0314 
0315     if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
0316         return -EINVAL;
0317     return 0;
0318 }
0319 
0320 static int
0321 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
0322          int nsgs, gfp_t gfp)
0323 {
0324     int i;
0325 
0326     for (i = 0; i < nsgs; i++) {
0327         struct page *page;
0328         void *data;
0329         int offset;
0330 
0331         data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
0332         if (!data)
0333             break;
0334 
0335         page = virt_to_head_page(data);
0336         offset = data - page_address(page);
0337         sg_set_page(&urb->sg[i], page, q->buf_size, offset);
0338     }
0339 
0340     if (i < nsgs) {
0341         int j;
0342 
0343         for (j = nsgs; j < urb->num_sgs; j++)
0344             skb_free_frag(sg_virt(&urb->sg[j]));
0345         urb->num_sgs = i;
0346     }
0347 
0348     urb->num_sgs = max_t(int, i, urb->num_sgs);
0349     urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
0350     sg_init_marker(urb->sg, urb->num_sgs);
0351 
0352     return i ? : -ENOMEM;
0353 }
0354 
0355 static int
0356 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
0357         struct urb *urb, int nsgs, gfp_t gfp)
0358 {
0359     enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
0360 
0361     if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
0362         return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
0363 
0364     urb->transfer_buffer_length = q->buf_size;
0365     urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
0366 
0367     return urb->transfer_buffer ? 0 : -ENOMEM;
0368 }
0369 
0370 static int
0371 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
0372         int sg_max_size)
0373 {
0374     unsigned int size = sizeof(struct urb);
0375 
0376     if (dev->usb.sg_en)
0377         size += sg_max_size * sizeof(struct scatterlist);
0378 
0379     e->urb = kzalloc(size, GFP_KERNEL);
0380     if (!e->urb)
0381         return -ENOMEM;
0382 
0383     usb_init_urb(e->urb);
0384 
0385     if (dev->usb.sg_en && sg_max_size > 0)
0386         e->urb->sg = (struct scatterlist *)(e->urb + 1);
0387 
0388     return 0;
0389 }
0390 
0391 static int
0392 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
0393            struct mt76_queue_entry *e)
0394 {
0395     enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
0396     int err, sg_size;
0397 
0398     sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
0399     err = mt76u_urb_alloc(dev, e, sg_size);
0400     if (err)
0401         return err;
0402 
0403     return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
0404 }
0405 
0406 static void mt76u_urb_free(struct urb *urb)
0407 {
0408     int i;
0409 
0410     for (i = 0; i < urb->num_sgs; i++)
0411         skb_free_frag(sg_virt(&urb->sg[i]));
0412 
0413     if (urb->transfer_buffer)
0414         skb_free_frag(urb->transfer_buffer);
0415 
0416     usb_free_urb(urb);
0417 }
0418 
0419 static void
0420 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
0421             struct urb *urb, usb_complete_t complete_fn,
0422             void *context)
0423 {
0424     struct usb_interface *uintf = to_usb_interface(dev->dev);
0425     struct usb_device *udev = interface_to_usbdev(uintf);
0426     unsigned int pipe;
0427 
0428     if (dir == USB_DIR_IN)
0429         pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
0430     else
0431         pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
0432 
0433     urb->dev = udev;
0434     urb->pipe = pipe;
0435     urb->complete = complete_fn;
0436     urb->context = context;
0437 }
0438 
0439 static struct urb *
0440 mt76u_get_next_rx_entry(struct mt76_queue *q)
0441 {
0442     struct urb *urb = NULL;
0443     unsigned long flags;
0444 
0445     spin_lock_irqsave(&q->lock, flags);
0446     if (q->queued > 0) {
0447         urb = q->entry[q->tail].urb;
0448         q->tail = (q->tail + 1) % q->ndesc;
0449         q->queued--;
0450     }
0451     spin_unlock_irqrestore(&q->lock, flags);
0452 
0453     return urb;
0454 }
0455 
0456 static int
0457 mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
0458                u32 data_len)
0459 {
0460     u16 dma_len, min_len;
0461 
0462     dma_len = get_unaligned_le16(data);
0463     if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
0464         return dma_len;
0465 
0466     min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
0467     if (data_len < min_len || !dma_len ||
0468         dma_len + MT_DMA_HDR_LEN > data_len ||
0469         (dma_len & 0x3))
0470         return -EINVAL;
0471     return dma_len;
0472 }
0473 
0474 static struct sk_buff *
0475 mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
0476            int len, int buf_size)
0477 {
0478     int head_room, drv_flags = dev->drv->drv_flags;
0479     struct sk_buff *skb;
0480 
0481     head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
0482     if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
0483         struct page *page;
0484 
0485         /* slow path, not enough space for data and
0486          * skb_shared_info
0487          */
0488         skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
0489         if (!skb)
0490             return NULL;
0491 
0492         skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
0493         data += head_room + MT_SKB_HEAD_LEN;
0494         page = virt_to_head_page(data);
0495         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
0496                 page, data - page_address(page),
0497                 len - MT_SKB_HEAD_LEN, buf_size);
0498 
0499         return skb;
0500     }
0501 
0502     /* fast path */
0503     skb = build_skb(data, buf_size);
0504     if (!skb)
0505         return NULL;
0506 
0507     skb_reserve(skb, head_room);
0508     __skb_put(skb, len);
0509 
0510     return skb;
0511 }
0512 
0513 static int
0514 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
0515                int buf_size)
0516 {
0517     u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
0518     int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
0519     int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
0520     struct sk_buff *skb;
0521 
0522     if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
0523         return 0;
0524 
0525     len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
0526     if (len < 0)
0527         return 0;
0528 
0529     head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
0530     data_len = min_t(int, len, data_len - head_room);
0531     skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
0532     if (!skb)
0533         return 0;
0534 
0535     len -= data_len;
0536     while (len > 0 && nsgs < urb->num_sgs) {
0537         data_len = min_t(int, len, urb->sg[nsgs].length);
0538         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
0539                 sg_page(&urb->sg[nsgs]),
0540                 urb->sg[nsgs].offset, data_len,
0541                 buf_size);
0542         len -= data_len;
0543         nsgs++;
0544     }
0545     dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
0546 
0547     return nsgs;
0548 }
0549 
0550 static void mt76u_complete_rx(struct urb *urb)
0551 {
0552     struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
0553     struct mt76_queue *q = urb->context;
0554     unsigned long flags;
0555 
0556     trace_rx_urb(dev, urb);
0557 
0558     switch (urb->status) {
0559     case -ECONNRESET:
0560     case -ESHUTDOWN:
0561     case -ENOENT:
0562     case -EPROTO:
0563         return;
0564     default:
0565         dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
0566                     urb->status);
0567         fallthrough;
0568     case 0:
0569         break;
0570     }
0571 
0572     spin_lock_irqsave(&q->lock, flags);
0573     if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
0574         goto out;
0575 
0576     q->head = (q->head + 1) % q->ndesc;
0577     q->queued++;
0578     mt76_worker_schedule(&dev->usb.rx_worker);
0579 out:
0580     spin_unlock_irqrestore(&q->lock, flags);
0581 }
0582 
0583 static int
0584 mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
0585             struct urb *urb)
0586 {
0587     int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
0588 
0589     mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
0590                 mt76u_complete_rx, &dev->q_rx[qid]);
0591     trace_submit_urb(dev, urb);
0592 
0593     return usb_submit_urb(urb, GFP_ATOMIC);
0594 }
0595 
0596 static void
0597 mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
0598 {
0599     int qid = q - &dev->q_rx[MT_RXQ_MAIN];
0600     struct urb *urb;
0601     int err, count;
0602 
0603     while (true) {
0604         urb = mt76u_get_next_rx_entry(q);
0605         if (!urb)
0606             break;
0607 
0608         count = mt76u_process_rx_entry(dev, urb, q->buf_size);
0609         if (count > 0) {
0610             err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
0611             if (err < 0)
0612                 break;
0613         }
0614         mt76u_submit_rx_buf(dev, qid, urb);
0615     }
0616     if (qid == MT_RXQ_MAIN) {
0617         local_bh_disable();
0618         mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
0619         local_bh_enable();
0620     }
0621 }
0622 
0623 static void mt76u_rx_worker(struct mt76_worker *w)
0624 {
0625     struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker);
0626     struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
0627     int i;
0628 
0629     rcu_read_lock();
0630     mt76_for_each_q_rx(dev, i)
0631         mt76u_process_rx_queue(dev, &dev->q_rx[i]);
0632     rcu_read_unlock();
0633 }
0634 
0635 static int
0636 mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
0637 {
0638     struct mt76_queue *q = &dev->q_rx[qid];
0639     unsigned long flags;
0640     int i, err = 0;
0641 
0642     spin_lock_irqsave(&q->lock, flags);
0643     for (i = 0; i < q->ndesc; i++) {
0644         err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
0645         if (err < 0)
0646             break;
0647     }
0648     q->head = q->tail = 0;
0649     q->queued = 0;
0650     spin_unlock_irqrestore(&q->lock, flags);
0651 
0652     return err;
0653 }
0654 
0655 static int
0656 mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
0657 {
0658     struct mt76_queue *q = &dev->q_rx[qid];
0659     int i, err;
0660 
0661     spin_lock_init(&q->lock);
0662     q->entry = devm_kcalloc(dev->dev,
0663                 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
0664                 GFP_KERNEL);
0665     if (!q->entry)
0666         return -ENOMEM;
0667 
0668     q->ndesc = MT_NUM_RX_ENTRIES;
0669     q->buf_size = PAGE_SIZE;
0670 
0671     for (i = 0; i < q->ndesc; i++) {
0672         err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
0673         if (err < 0)
0674             return err;
0675     }
0676 
0677     return mt76u_submit_rx_buffers(dev, qid);
0678 }
0679 
0680 int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
0681 {
0682     return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
0683 }
0684 EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
0685 
0686 static void
0687 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
0688 {
0689     struct page *page;
0690     int i;
0691 
0692     for (i = 0; i < q->ndesc; i++) {
0693         if (!q->entry[i].urb)
0694             continue;
0695 
0696         mt76u_urb_free(q->entry[i].urb);
0697         q->entry[i].urb = NULL;
0698     }
0699 
0700     if (!q->rx_page.va)
0701         return;
0702 
0703     page = virt_to_page(q->rx_page.va);
0704     __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
0705     memset(&q->rx_page, 0, sizeof(q->rx_page));
0706 }
0707 
0708 static void mt76u_free_rx(struct mt76_dev *dev)
0709 {
0710     int i;
0711 
0712     mt76_worker_teardown(&dev->usb.rx_worker);
0713 
0714     mt76_for_each_q_rx(dev, i)
0715         mt76u_free_rx_queue(dev, &dev->q_rx[i]);
0716 }
0717 
0718 void mt76u_stop_rx(struct mt76_dev *dev)
0719 {
0720     int i;
0721 
0722     mt76_worker_disable(&dev->usb.rx_worker);
0723 
0724     mt76_for_each_q_rx(dev, i) {
0725         struct mt76_queue *q = &dev->q_rx[i];
0726         int j;
0727 
0728         for (j = 0; j < q->ndesc; j++)
0729             usb_poison_urb(q->entry[j].urb);
0730     }
0731 }
0732 EXPORT_SYMBOL_GPL(mt76u_stop_rx);
0733 
0734 int mt76u_resume_rx(struct mt76_dev *dev)
0735 {
0736     int i;
0737 
0738     mt76_for_each_q_rx(dev, i) {
0739         struct mt76_queue *q = &dev->q_rx[i];
0740         int err, j;
0741 
0742         for (j = 0; j < q->ndesc; j++)
0743             usb_unpoison_urb(q->entry[j].urb);
0744 
0745         err = mt76u_submit_rx_buffers(dev, i);
0746         if (err < 0)
0747             return err;
0748     }
0749 
0750     mt76_worker_enable(&dev->usb.rx_worker);
0751 
0752     return 0;
0753 }
0754 EXPORT_SYMBOL_GPL(mt76u_resume_rx);
0755 
0756 static void mt76u_status_worker(struct mt76_worker *w)
0757 {
0758     struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker);
0759     struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
0760     struct mt76_queue_entry entry;
0761     struct mt76_queue *q;
0762     int i;
0763 
0764     for (i = 0; i < IEEE80211_NUM_ACS; i++) {
0765         q = dev->phy.q_tx[i];
0766         if (!q)
0767             continue;
0768 
0769         while (q->queued > 0) {
0770             if (!q->entry[q->tail].done)
0771                 break;
0772 
0773             entry = q->entry[q->tail];
0774             q->entry[q->tail].done = false;
0775 
0776             mt76_queue_tx_complete(dev, q, &entry);
0777         }
0778 
0779         if (!q->queued)
0780             wake_up(&dev->tx_wait);
0781 
0782         mt76_worker_schedule(&dev->tx_worker);
0783 
0784         if (dev->drv->tx_status_data &&
0785             !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
0786             queue_work(dev->wq, &dev->usb.stat_work);
0787     }
0788 }
0789 
0790 static void mt76u_tx_status_data(struct work_struct *work)
0791 {
0792     struct mt76_usb *usb;
0793     struct mt76_dev *dev;
0794     u8 update = 1;
0795     u16 count = 0;
0796 
0797     usb = container_of(work, struct mt76_usb, stat_work);
0798     dev = container_of(usb, struct mt76_dev, usb);
0799 
0800     while (true) {
0801         if (test_bit(MT76_REMOVED, &dev->phy.state))
0802             break;
0803 
0804         if (!dev->drv->tx_status_data(dev, &update))
0805             break;
0806         count++;
0807     }
0808 
0809     if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
0810         queue_work(dev->wq, &usb->stat_work);
0811     else
0812         clear_bit(MT76_READING_STATS, &dev->phy.state);
0813 }
0814 
0815 static void mt76u_complete_tx(struct urb *urb)
0816 {
0817     struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
0818     struct mt76_queue_entry *e = urb->context;
0819 
0820     if (mt76u_urb_error(urb))
0821         dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
0822     e->done = true;
0823 
0824     mt76_worker_schedule(&dev->usb.status_worker);
0825 }
0826 
0827 static int
0828 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
0829                struct urb *urb)
0830 {
0831     urb->transfer_buffer_length = skb->len;
0832 
0833     if (!dev->usb.sg_en) {
0834         urb->transfer_buffer = skb->data;
0835         return 0;
0836     }
0837 
0838     sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
0839     urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
0840     if (!urb->num_sgs)
0841         return -ENOMEM;
0842 
0843     return urb->num_sgs;
0844 }
0845 
0846 static int
0847 mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
0848            enum mt76_txq_id qid, struct sk_buff *skb,
0849            struct mt76_wcid *wcid, struct ieee80211_sta *sta)
0850 {
0851     struct mt76_tx_info tx_info = {
0852         .skb = skb,
0853     };
0854     u16 idx = q->head;
0855     int err;
0856 
0857     if (q->queued == q->ndesc)
0858         return -ENOSPC;
0859 
0860     skb->prev = skb->next = NULL;
0861     err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
0862     if (err < 0)
0863         return err;
0864 
0865     err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
0866     if (err < 0)
0867         return err;
0868 
0869     mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
0870                 q->entry[idx].urb, mt76u_complete_tx,
0871                 &q->entry[idx]);
0872 
0873     q->head = (q->head + 1) % q->ndesc;
0874     q->entry[idx].skb = tx_info.skb;
0875     q->entry[idx].wcid = 0xffff;
0876     q->queued++;
0877 
0878     return idx;
0879 }
0880 
0881 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
0882 {
0883     struct urb *urb;
0884     int err;
0885 
0886     while (q->first != q->head) {
0887         urb = q->entry[q->first].urb;
0888 
0889         trace_submit_urb(dev, urb);
0890         err = usb_submit_urb(urb, GFP_ATOMIC);
0891         if (err < 0) {
0892             if (err == -ENODEV)
0893                 set_bit(MT76_REMOVED, &dev->phy.state);
0894             else
0895                 dev_err(dev->dev, "tx urb submit failed:%d\n",
0896                     err);
0897             break;
0898         }
0899         q->first = (q->first + 1) % q->ndesc;
0900     }
0901 }
0902 
0903 static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
0904 {
0905     if (mt76_chip(dev) == 0x7663) {
0906         static const u8 lmac_queue_map[] = {
0907             /* ac to lmac mapping */
0908             [IEEE80211_AC_BK] = 0,
0909             [IEEE80211_AC_BE] = 1,
0910             [IEEE80211_AC_VI] = 2,
0911             [IEEE80211_AC_VO] = 4,
0912         };
0913 
0914         if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
0915             return 1; /* BE */
0916 
0917         return lmac_queue_map[ac];
0918     }
0919 
0920     return mt76_ac_to_hwq(ac);
0921 }
0922 
0923 static int mt76u_alloc_tx(struct mt76_dev *dev)
0924 {
0925     struct mt76_queue *q;
0926     int i, j, err;
0927 
0928     for (i = 0; i <= MT_TXQ_PSD; i++) {
0929         if (i >= IEEE80211_NUM_ACS) {
0930             dev->phy.q_tx[i] = dev->phy.q_tx[0];
0931             continue;
0932         }
0933 
0934         q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
0935         if (!q)
0936             return -ENOMEM;
0937 
0938         spin_lock_init(&q->lock);
0939         q->hw_idx = mt76u_ac_to_hwq(dev, i);
0940 
0941         dev->phy.q_tx[i] = q;
0942 
0943         q->entry = devm_kcalloc(dev->dev,
0944                     MT_NUM_TX_ENTRIES, sizeof(*q->entry),
0945                     GFP_KERNEL);
0946         if (!q->entry)
0947             return -ENOMEM;
0948 
0949         q->ndesc = MT_NUM_TX_ENTRIES;
0950         for (j = 0; j < q->ndesc; j++) {
0951             err = mt76u_urb_alloc(dev, &q->entry[j],
0952                           MT_TX_SG_MAX_SIZE);
0953             if (err < 0)
0954                 return err;
0955         }
0956     }
0957     return 0;
0958 }
0959 
0960 static void mt76u_free_tx(struct mt76_dev *dev)
0961 {
0962     int i;
0963 
0964     mt76_worker_teardown(&dev->usb.status_worker);
0965 
0966     for (i = 0; i < IEEE80211_NUM_ACS; i++) {
0967         struct mt76_queue *q;
0968         int j;
0969 
0970         q = dev->phy.q_tx[i];
0971         if (!q)
0972             continue;
0973 
0974         for (j = 0; j < q->ndesc; j++) {
0975             usb_free_urb(q->entry[j].urb);
0976             q->entry[j].urb = NULL;
0977         }
0978     }
0979 }
0980 
0981 void mt76u_stop_tx(struct mt76_dev *dev)
0982 {
0983     int ret;
0984 
0985     mt76_worker_disable(&dev->usb.status_worker);
0986 
0987     ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
0988                  HZ / 5);
0989     if (!ret) {
0990         struct mt76_queue_entry entry;
0991         struct mt76_queue *q;
0992         int i, j;
0993 
0994         dev_err(dev->dev, "timed out waiting for pending tx\n");
0995 
0996         for (i = 0; i < IEEE80211_NUM_ACS; i++) {
0997             q = dev->phy.q_tx[i];
0998             if (!q)
0999                 continue;
1000 
1001             for (j = 0; j < q->ndesc; j++)
1002                 usb_kill_urb(q->entry[j].urb);
1003         }
1004 
1005         mt76_worker_disable(&dev->tx_worker);
1006 
1007         /* On device removal we maight queue skb's, but mt76u_tx_kick()
1008          * will fail to submit urb, cleanup those skb's manually.
1009          */
1010         for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1011             q = dev->phy.q_tx[i];
1012             if (!q)
1013                 continue;
1014 
1015             while (q->queued > 0) {
1016                 entry = q->entry[q->tail];
1017                 q->entry[q->tail].done = false;
1018                 mt76_queue_tx_complete(dev, q, &entry);
1019             }
1020         }
1021 
1022         mt76_worker_enable(&dev->tx_worker);
1023     }
1024 
1025     cancel_work_sync(&dev->usb.stat_work);
1026     clear_bit(MT76_READING_STATS, &dev->phy.state);
1027 
1028     mt76_worker_enable(&dev->usb.status_worker);
1029 
1030     mt76_tx_status_check(dev, true);
1031 }
1032 EXPORT_SYMBOL_GPL(mt76u_stop_tx);
1033 
1034 void mt76u_queues_deinit(struct mt76_dev *dev)
1035 {
1036     mt76u_stop_rx(dev);
1037     mt76u_stop_tx(dev);
1038 
1039     mt76u_free_rx(dev);
1040     mt76u_free_tx(dev);
1041 }
1042 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
1043 
1044 int mt76u_alloc_queues(struct mt76_dev *dev)
1045 {
1046     int err;
1047 
1048     err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
1049     if (err < 0)
1050         return err;
1051 
1052     return mt76u_alloc_tx(dev);
1053 }
1054 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
1055 
1056 static const struct mt76_queue_ops usb_queue_ops = {
1057     .tx_queue_skb = mt76u_tx_queue_skb,
1058     .kick = mt76u_tx_kick,
1059 };
1060 
1061 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
1062          struct mt76_bus_ops *ops)
1063 {
1064     struct usb_device *udev = interface_to_usbdev(intf);
1065     struct mt76_usb *usb = &dev->usb;
1066     int err;
1067 
1068     INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
1069 
1070     usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
1071     if (usb->data_len < 32)
1072         usb->data_len = 32;
1073 
1074     usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
1075     if (!usb->data)
1076         return -ENOMEM;
1077 
1078     mutex_init(&usb->usb_ctrl_mtx);
1079     dev->bus = ops;
1080     dev->queue_ops = &usb_queue_ops;
1081 
1082     dev_set_drvdata(&udev->dev, dev);
1083 
1084     usb->sg_en = mt76u_check_sg(dev);
1085 
1086     err = mt76u_set_endpoints(intf, usb);
1087     if (err < 0)
1088         return err;
1089 
1090     err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker,
1091                 "usb-rx");
1092     if (err)
1093         return err;
1094 
1095     err = mt76_worker_setup(dev->hw, &usb->status_worker,
1096                 mt76u_status_worker, "usb-status");
1097     if (err)
1098         return err;
1099 
1100     sched_set_fifo_low(usb->rx_worker.task);
1101     sched_set_fifo_low(usb->status_worker.task);
1102 
1103     return 0;
1104 }
1105 EXPORT_SYMBOL_GPL(__mt76u_init);
1106 
1107 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
1108 {
1109     static struct mt76_bus_ops bus_ops = {
1110         .rr = mt76u_rr,
1111         .wr = mt76u_wr,
1112         .rmw = mt76u_rmw,
1113         .read_copy = mt76u_read_copy,
1114         .write_copy = mt76u_copy,
1115         .wr_rp = mt76u_wr_rp,
1116         .rd_rp = mt76u_rd_rp,
1117         .type = MT76_BUS_USB,
1118     };
1119 
1120     return __mt76u_init(dev, intf, &bus_ops);
1121 }
1122 EXPORT_SYMBOL_GPL(mt76u_init);
1123 
1124 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
1125 MODULE_LICENSE("Dual BSD/GPL");