0001
0002
0003 #include "mt7603.h"
0004 #include "mac.h"
0005 #include "../dma.h"
0006
0007 static void
0008 mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
0009 {
0010 static const u8 tid_to_ac[8] = {
0011 IEEE80211_AC_BE,
0012 IEEE80211_AC_BK,
0013 IEEE80211_AC_BK,
0014 IEEE80211_AC_BE,
0015 IEEE80211_AC_VI,
0016 IEEE80211_AC_VI,
0017 IEEE80211_AC_VO,
0018 IEEE80211_AC_VO
0019 };
0020 __le32 *txd = (__le32 *)skb->data;
0021 struct ieee80211_hdr *hdr;
0022 struct ieee80211_sta *sta;
0023 struct mt7603_sta *msta;
0024 struct mt76_wcid *wcid;
0025 void *priv;
0026 int idx;
0027 u32 val;
0028 u8 tid = 0;
0029
0030 if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
0031 goto free;
0032
0033 val = le32_to_cpu(txd[1]);
0034 idx = FIELD_GET(MT_TXD1_WLAN_IDX, val);
0035 skb->priority = FIELD_GET(MT_TXD1_TID, val);
0036
0037 if (idx >= MT7603_WTBL_STA - 1)
0038 goto free;
0039
0040 wcid = rcu_dereference(dev->mt76.wcid[idx]);
0041 if (!wcid)
0042 goto free;
0043
0044 priv = msta = container_of(wcid, struct mt7603_sta, wcid);
0045 val = le32_to_cpu(txd[0]);
0046 val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
0047 val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
0048 txd[0] = cpu_to_le32(val);
0049
0050 sta = container_of(priv, struct ieee80211_sta, drv_priv);
0051 hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
0052 if (ieee80211_is_data_qos(hdr->frame_control))
0053 tid = *ieee80211_get_qos_ctl(hdr) &
0054 IEEE80211_QOS_CTL_TAG1D_MASK;
0055 skb_set_queue_mapping(skb, tid_to_ac[tid]);
0056 ieee80211_sta_set_buffered(sta, tid, true);
0057
0058 spin_lock_bh(&dev->ps_lock);
0059 __skb_queue_tail(&msta->psq, skb);
0060 if (skb_queue_len(&msta->psq) >= 64) {
0061 skb = __skb_dequeue(&msta->psq);
0062 dev_kfree_skb(skb);
0063 }
0064 spin_unlock_bh(&dev->ps_lock);
0065 return;
0066
0067 free:
0068 dev_kfree_skb(skb);
0069 }
0070
0071 void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
0072 struct sk_buff *skb)
0073 {
0074 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
0075 __le32 *rxd = (__le32 *)skb->data;
0076 __le32 *end = (__le32 *)&skb->data[skb->len];
0077 enum rx_pkt_type type;
0078
0079 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
0080
0081 if (q == MT_RXQ_MCU) {
0082 if (type == PKT_TYPE_RX_EVENT)
0083 mt76_mcu_rx_event(&dev->mt76, skb);
0084 else
0085 mt7603_rx_loopback_skb(dev, skb);
0086 return;
0087 }
0088
0089 switch (type) {
0090 case PKT_TYPE_TXS:
0091 for (rxd++; rxd + 5 <= end; rxd += 5)
0092 mt7603_mac_add_txs(dev, rxd);
0093 dev_kfree_skb(skb);
0094 break;
0095 case PKT_TYPE_RX_EVENT:
0096 mt76_mcu_rx_event(&dev->mt76, skb);
0097 return;
0098 case PKT_TYPE_NORMAL:
0099 if (mt7603_mac_fill_rx(dev, skb) == 0) {
0100 mt76_rx(&dev->mt76, q, skb);
0101 return;
0102 }
0103 fallthrough;
0104 default:
0105 dev_kfree_skb(skb);
0106 break;
0107 }
0108 }
0109
0110 static int
0111 mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
0112 int idx, int n_desc, int bufsize)
0113 {
0114 int err;
0115
0116 err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
0117 MT_RX_RING_BASE);
0118 if (err < 0)
0119 return err;
0120
0121 mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
0122
0123 return 0;
0124 }
0125
0126 static int mt7603_poll_tx(struct napi_struct *napi, int budget)
0127 {
0128 struct mt7603_dev *dev;
0129 int i;
0130
0131 dev = container_of(napi, struct mt7603_dev, mt76.tx_napi);
0132 dev->tx_dma_check = 0;
0133
0134 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
0135 for (i = MT_TXQ_PSD; i >= 0; i--)
0136 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
0137
0138 if (napi_complete_done(napi, 0))
0139 mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
0140
0141 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
0142 for (i = MT_TXQ_PSD; i >= 0; i--)
0143 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
0144
0145 mt7603_mac_sta_poll(dev);
0146
0147 mt76_worker_schedule(&dev->mt76.tx_worker);
0148
0149 return 0;
0150 }
0151
0152 int mt7603_dma_init(struct mt7603_dev *dev)
0153 {
0154 static const u8 wmm_queue_map[] = {
0155 [IEEE80211_AC_BK] = 0,
0156 [IEEE80211_AC_BE] = 1,
0157 [IEEE80211_AC_VI] = 2,
0158 [IEEE80211_AC_VO] = 3,
0159 };
0160 int ret;
0161 int i;
0162
0163 mt76_dma_attach(&dev->mt76);
0164
0165 mt76_clear(dev, MT_WPDMA_GLO_CFG,
0166 MT_WPDMA_GLO_CFG_TX_DMA_EN |
0167 MT_WPDMA_GLO_CFG_RX_DMA_EN |
0168 MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
0169 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
0170
0171 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
0172 mt7603_pse_client_reset(dev);
0173
0174 for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
0175 ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
0176 MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0);
0177 if (ret)
0178 return ret;
0179 }
0180
0181 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
0182 MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
0183 if (ret)
0184 return ret;
0185
0186 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU,
0187 MT_MCU_RING_SIZE, MT_TX_RING_BASE);
0188 if (ret)
0189 return ret;
0190
0191 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
0192 MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
0193 if (ret)
0194 return ret;
0195
0196 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
0197 MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
0198 if (ret)
0199 return ret;
0200
0201 mt7603_irq_enable(dev,
0202 MT_INT_TX_DONE(IEEE80211_AC_VO) |
0203 MT_INT_TX_DONE(IEEE80211_AC_VI) |
0204 MT_INT_TX_DONE(IEEE80211_AC_BE) |
0205 MT_INT_TX_DONE(IEEE80211_AC_BK) |
0206 MT_INT_TX_DONE(MT_TX_HW_QUEUE_MGMT) |
0207 MT_INT_TX_DONE(MT_TX_HW_QUEUE_MCU) |
0208 MT_INT_TX_DONE(MT_TX_HW_QUEUE_BCN) |
0209 MT_INT_TX_DONE(MT_TX_HW_QUEUE_BMC));
0210
0211 ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
0212 MT7603_MCU_RX_RING_SIZE, MT_RX_BUF_SIZE);
0213 if (ret)
0214 return ret;
0215
0216 ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
0217 MT7603_RX_RING_SIZE, MT_RX_BUF_SIZE);
0218 if (ret)
0219 return ret;
0220
0221 mt76_wr(dev, MT_DELAY_INT_CFG, 0);
0222 ret = mt76_init_queues(dev, mt76_dma_rx_poll);
0223 if (ret)
0224 return ret;
0225
0226 netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
0227 mt7603_poll_tx);
0228 napi_enable(&dev->mt76.tx_napi);
0229
0230 return 0;
0231 }
0232
0233 void mt7603_dma_cleanup(struct mt7603_dev *dev)
0234 {
0235 mt76_clear(dev, MT_WPDMA_GLO_CFG,
0236 MT_WPDMA_GLO_CFG_TX_DMA_EN |
0237 MT_WPDMA_GLO_CFG_RX_DMA_EN |
0238 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
0239
0240 mt76_dma_cleanup(&dev->mt76);
0241 }